#!/usr/bin/env python import unittest import socket from framework import VppTestCase, VppTestRunner from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \ VppMplsIpBind, VppIpMRoute, VppMRoutePath, \ MRouteItfFlags, MRouteEntryFlags, DpoProto, VppIpTable, VppMplsTable, \ VppMplsLabel, MplsLspMode from vpp_mpls_tunnel_interface import VppMPLSTunnelInterface from scapy.packet import Raw from scapy.layers.l2 import Ether from scapy.layers.inet import IP, UDP, ICMP from scapy.layers.inet6 import IPv6, ICMPv6TimeExceeded from scapy.contrib.mpls import MPLS def verify_filter(capture, sent): if not len(capture) == len(sent): # filter out any IPv6 RAs from the capture for p in capture: if p.haslayer(IPv6): capture.remove(p) return capture def verify_mpls_stack(tst, rx, mpls_labels): # the rx'd packet has the MPLS label popped eth = rx[Ether] tst.assertEqual(eth.type, 0x8847) rx_mpls = rx[MPLS] for ii in range(len(mpls_labels)): tst.assertEqual(rx_mpls.label, mpls_labels[ii].value) tst.assertEqual(rx_mpls.cos, mpls_labels[ii].exp) tst.assertEqual(rx_mpls.ttl, mpls_labels[ii].ttl) if ii == len(mpls_labels) - 1: tst.assertEqual(rx_mpls.s, 1) else: # not end of stack tst.assertEqual(rx_mpls.s, 0) # pop the label to expose the next rx_mpls = rx_mpls[MPLS].payload class TestMPLS(VppTestCase): """ MPLS Test Case """ def setUp(self): super(TestMPLS, self).setUp() # create 2 pg interfaces self.create_pg_interfaces(range(4)) # setup both interfaces # assign them different tables. table_id = 0 self.tables = [] tbl = VppMplsTable(self, 0) tbl.add_vpp_config() self.tables.append(tbl) for i in self.pg_interfaces: i.admin_up() if table_id != 0: tbl = VppIpTable(self, table_id) tbl.add_vpp_config() self.tables.append(tbl) tbl = VppIpTable(self, table_id, is_ip6=1) tbl.add_vpp_config() self.tables.append(tbl) i.set_table_ip4(table_id) i.set_table_ip6(table_id) i.config_ip4() i.resolve_arp() i.config_ip6() i.resolve_ndp() i.enable_mpls() table_id += 1 def tearDown(self): for i in self.pg_interfaces: i.unconfig_ip4() i.unconfig_ip6() i.ip6_disable() i.set_table_ip4(0) i.set_table_ip6(0) i.disable_mpls() i.admin_down() super(TestMPLS, self).tearDown() # the default of 64 matches the IP packet TTL default def create_stream_labelled_ip4( self, src_if, mpls_labels, ping=0, ip_itf=None, dst_ip=None, chksum=None, ip_ttl=64, n=257): self.reset_packet_infos() pkts = [] for i in range(0, n): info = self.create_packet_info(src_if, src_if) payload = self.info_to_payload(info) p = Ether(dst=src_if.local_mac, src=src_if.remote_mac) for ii in range(len(mpls_labels)): p = p / MPLS(label=mpls_labels[ii].value, ttl=mpls_labels[ii].ttl, cos=mpls_labels[ii].exp) if not ping: if not dst_ip: p = (p / IP(src=src_if.local_ip4, dst=src_if.remote_ip4, ttl=ip_ttl) / UDP(sport=1234, dport=1234) / Raw(payload)) else: p = (p / IP(src=src_if.local_ip4, dst=dst_ip, ttl=ip_ttl) / UDP(sport=1234, dport=1234) / Raw(payload)) else: p = (p / IP(src=ip_itf.remote_ip4, dst=ip_itf.local_ip4, ttl=ip_ttl) / ICMP()) if chksum: p[IP].chksum = chksum info.data = p.copy() pkts.append(p) return pkts def create_stream_ip4(self, src_if, dst_ip, ip_ttl=64, ip_dscp=0): self.reset_packet_infos() pkts = [] for i in range(0, 257): info = self.create_packet_info(src_if, src_if) payload = self.info_to_payload(info) p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) / IP(src=src_if.remote_ip4, dst=dst_ip, ttl=ip_ttl, tos=ip_dscp) / UDP(sport=1234, dport=1234) / Raw(payload)) info.data = p.copy() pkts.append(p) return pkts def create_stream_ip6(self, src_if, dst_ip, ip_ttl=64, ip_dscp=0): self.reset_packet_infos() pkts = [] for i in range(0, 257): info = self.create_packet_info(src_if, src_if) payload = self.info_to_payload(info) p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) / IPv6(src=src_if.remote_ip6, dst=dst_ip, hlim=ip_ttl, tc=ip_dscp) / UDP(sport=1234, dport=1234) / Raw(payload)) info.data = p.copy() pkts.append(p) return pkts def create_stream_labelled_ip6(self, src_if, mpls_labels, hlim=64, dst_ip=None): if dst_ip is None: dst_ip = src_if.remote_ip6 self.reset_packet_infos() pkts = [] for i in range(0, 257): info = self.create_packet_info(src_if, src_if) payload = self.info_to_payload(info) p = Ether(dst=src_if.local_mac, src=src_if.remote_mac) for l in mpls_labels: p = p / MPLS(label=l.value, ttl=l.ttl, cos=l.exp) p = p / (IPv6(src=src_if.remote_ip6, dst=dst_ip, hlim=hlim) / UDP(sport=1234, dport=1234) / Raw(payload)) info.data = p.copy() pkts.append(p) return pkts def verify_capture_ip4(self, src_if, capture, sent, ping_resp=0, ip_ttl=None, ip_dscp=0): try: capture = verify_filter(capture, sent) self.assertEqual(len(capture), len(sent)) for i in range(len(capture)): tx = sent[i] rx = capture[i] # the rx'd packet has the MPLS label popped eth = rx[Ether] self.assertEqual(eth.type, 0x800) tx_ip = tx[IP] rx_ip = rx[IP] if not ping_resp: self.assertEqual(rx_ip.src, tx_ip.src) self.assertEqual(rx_ip.dst, tx_ip.dst) self.assertEqual(rx_ip.tos, ip_dscp) if not ip_ttl: # IP processing post pop has decremented the TTL self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl) else: self.assertEqual(rx_ip.ttl, ip_ttl) else: self.assertEqual(rx_ip.src, tx_ip.dst) self.assertEqual(rx_ip.dst, tx_ip.src) except: raise def verify_capture_labelled_ip4(self, src_if, capture, sent, mpls_labels, ip_ttl=None): try: capture = verify_filter(capture, sent) self.assertEqual(len(capture), len(sent)) for i in range(len(capture)): tx = sent[i] rx = capture[i] tx_ip = tx[IP] rx_ip = rx[IP] verify_mpls_stack(self, rx, mpls_labels) self.assertEqual(rx_ip.src, tx_ip.src) self.assertEqual(rx_ip.dst, tx_ip.dst) if not ip_ttl: # IP processing post pop has decremented the TTL self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl) else: self.assertEqual(rx_ip.ttl, ip_ttl) except: raise def verify_capture_labelled_ip6(self, src_if, capture, sent, mpls_labels, ip_ttl=None): try: capture = verify_filter(capture, sent) self.assertEqual(len(capture), len(sent)) for i in range(len(capture)): tx = sent[i] rx = capture[i] tx_ip = tx[IPv6] rx_ip = rx[IPv6] verify_mpls_stack(self, rx, mpls_labels) self.assertEqual(rx_ip.src, tx_ip.src) self.assertEqual(rx_ip.dst, tx_ip.dst) if not ip_ttl: # IP processing post pop has decremented the TTL self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim) else: self.assertEqual(rx_ip.hlim, ip_ttl) except: raise def verify_capture_tunneled_ip4(self, src_if, capture, sent, mpls_labels): try: capture = verify_filter(capture, sent) self.assertEqual(len(capture), len(sent)) for i in range(len(capture)): tx = sent[i] rx = capture[i] tx_ip = tx[IP] rx_ip = rx[IP] verify_mpls_stack(self, rx, mpls_labels) self.assertEqual(rx_ip.src, tx_ip.src) self.assertEqual(rx_ip.dst, tx_ip.dst) # IP processing post pop has decremented the TTL self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl) except: raise def verify_capture_labelled(self, src_if, capture, sent, mpls_labels): try: capture = verify_filter(capture, sent) self.assertEqual(len(capture), len(sent)) for i in range(len(capture)): rx = capture[i] verify_mpls_stack(self, rx, mpls_labels) except: raise def verify_capture_ip6(self, src_if, capture, sent, ip_hlim=None, ip_dscp=0): try: self.assertEqual(len(capture), len(sent)) for i in range(len(capture)): tx = sent[i] rx = capture[i] # the rx'd packet has the MPLS label popped eth = rx[Ether] self.assertEqual(eth.type, 0x86DD) tx_ip = tx[IPv6] rx_ip = rx[IPv6] self.assertEqual(rx_ip.src, tx_ip.src) self.assertEqual(rx_ip.dst, tx_ip.dst) self.assertEqual(rx_ip.tc, ip_dscp) # IP processing post pop has decremented the TTL if not ip_hlim: self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim) else: self.assertEqual(rx_ip.hlim, ip_hlim) except: raise def verify_capture_ip6_icmp(self, src_if, capture, sent): try: self.assertEqual(len(capture), len(sent)) for i in range(len(capture)): tx = sent[i] rx = capture[i] # the rx'd packet has the MPLS label popped eth = rx[Ether] self.assertEqual(eth.type, 0x86DD) tx_ip = tx[IPv6] rx_ip = rx[IPv6] self.assertEqual(rx_ip.dst, tx_ip.src) # ICMP sourced from the interface's address self.assertEqual(rx_ip.src, src_if.local_ip6) # hop-limit reset to 255 for IMCP packet self.assertEqual(rx_ip.hlim, 254) icmp = rx[ICMPv6TimeExceeded] except: raise def test_swap(self): """ MPLS label swap tests """ # # A simple MPLS xconnect - eos label in label out # route_32_eos = VppMplsRoute(self, 32, 1, [VppRoutePath(self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[VppMplsLabel(33)])]) route_32_eos.add_vpp_config() # # a stream that matches the route for 10.0.0.1 # PG0 is in the default table # tx = self.create_stream_labelled_ip4(self.pg0, [VppMplsLabel(32, ttl=32, exp=1)]) rx = self.send_and_expect(self.pg0, tx, self.pg0) self.verify_capture_labelled(self.pg0, rx, tx, [Vpp
/*
 * Copyright (c) 2017 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#define _GNU_SOURCE

#include <vlib/vlib.h>
#include <vnet/bonding/node.h>
#include <lacp/node.h>

/*
 *  LACP State = DETACHED
 */
static lacp_fsm_state_t lacp_mux_state_detached[] = {
  {LACP_ACTION_DETACHED, LACP_MUX_STATE_DETACHED},	// event 0 BEGIN
  {LACP_ACTION_WAITING, LACP_MUX_STATE_WAITING},	// event 1 SELECTED
  {LACP_ACTION_WAITING, LACP_MUX_STATE_WAITING},	// event 2 STANDBY
  {LACP_ACTION_DETACHED, LACP_MUX_STATE_DETACHED},	// event 3 UNSELECTED
  {LACP_ACTION_DETACHED, LACP_MUX_STATE_DETACHED},	// event 4 READY
  {LACP_ACTION_DETACHED, LACP_MUX_STATE_DETACHED},	// event 5 SYNC
};

/*
 *  LACP State = WAITING
 */
static lacp_fsm_state_t lacp_mux_state_waiting[] = {
  {LACP_ACTION_DETACHED, LACP_MUX_STATE_DETACHED},	// event 0 BEGIN
  {LACP_ACTION_WAITING, LACP_MUX_STATE_WAITING},	// event 1 SELECTED
  {LACP_ACTION_WAITING, LACP_MUX_STATE_WAITING},	// event 2 STANDBY
  {LACP_ACTION_DETACHED, LACP_MUX_STATE_DETACHED},	// event 3 UNSELECTED
  {LACP_ACTION_ATTACHED, LACP_MUX_STATE_ATTACHED},	// event 4 READY
  {LACP_ACTION_WAITING, LACP_MUX_STATE_WAITING},	// event 5 SYNC
};

/*
 *  LACP State = ATTACHED
 */
static lacp_fsm_state_t lacp_mux_state_attached[] = {
  {LACP_ACTION_DETACHED, LACP_MUX_STATE_DETACHED},	// event 0 BEGIN
  {LACP_ACTION_ATTACHED, LACP_MUX_STATE_ATTACHED},	// event 1 SELECTED
  {LACP_ACTION_DETACHED, LACP_MUX_STATE_DETACHED},	// event 2 STANDBY
  {LACP_ACTION_DETACHED, LACP_MUX_STATE_DETACHED},	// event 3 UNSELECTED
  {LACP_ACTION_ATTACHED, LACP_MUX_STATE_ATTACHED},	// event 4 READY
  {LACP_ACTION_COLLECTING_DISTRIBUTING, LACP_MUX_STATE_COLLECTING_DISTRIBUTING},	// event 5_SYNC
};

/*
 *  LACP State = COLLECTING_DISTRIBUTING
 */
static lacp_fsm_state_t lacp_mux_state_collecting_distributing[] = {
  {LACP_ACTION_DETACHED, LACP_MUX_STATE_DETACHED},	// event 0 BEGIN
  {LACP_ACTION_COLLECTING_DISTRIBUTING, LACP_MUX_STATE_COLLECTING_DISTRIBUTING},	// event 1 SELECTED
  {LACP_ACTION_COLLECTING_DISTRIBUTING, LACP_MUX_STATE_COLLECTING_DISTRIBUTING},	// event 2 STANDBY
  {LACP_ACTION_ATTACHED, LACP_MUX_STATE_ATTACHED},	// event 3 UNSELECTED
  {LACP_ACTION_COLLECTING_DISTRIBUTING, LACP_MUX_STATE_COLLECTING_DISTRIBUTING},	// event 4 READY
  {LACP_ACTION_COLLECTING_DISTRIBUTING, LACP_MUX_STATE_COLLECTING_DISTRIBUTING},	// event 5 SYNC
};

static lacp_fsm_machine_t lacp_mux_fsm_table[] = {
  {lacp_mux_state_detached},
  {lacp_mux_state_waiting},
  {lacp_mux_state_attached},
  {lacp_mux_state_collecting_distributing},
};

lacp_machine_t lacp_mux_machine = {
  lacp_mux_fsm_table,
  lacp_mux_debug_func,
};

static void
lacp_detach_mux_from_aggregator (vlib_main_t * vm, slave_if_t * sif)
{
  sif->actor.state &= ~LACP_STATE_SYNCHRONIZATION;
  sif->ready = 0;
  sif->ready_n = 0;
}

static void
lacp_attach_mux_to_aggregator (vlib_main_t * vm, slave_if_t * sif)
{
  sif->actor.state |= LACP_STATE_SYNCHRONIZATION;
}

int
lacp_mux_action_detached (void *p1, void *p2)
{
  vlib_main_t *vm = p1;
  slave_if_t *sif = p2;
  lacp_main_t *lm = &lacp_main;

  lacp_detach_mux_from_aggregator (vm, sif);
  sif->actor.state &= ~LACP_STATE_COLLECTING;
  bond_disable_collecting_distributing (vm, sif);
  sif->actor.state &= ~LACP_STATE_DISTRIBUTING;
  sif->ntt = 1;
  lacp_start_periodic_timer (lm->vlib_main, sif, 0);

  if (sif->selected == LACP_PORT_SELECTED)
    lacp_machine_dispatch (&lacp_mux_machine, vm, sif,
			   LACP_MUX_EVENT_SELECTED, &sif->mux_state);

  if (sif->selected == LACP_PORT_STANDBY)
    lacp_machine_dispatch (&lacp_mux_machine, vm, sif, LACP_MUX_EVENT_STANDBY,
			   &sif->mux_state);

  return 0;
}

int
lacp_mux_action_attached (void *p1, void *p2)
{
  vlib_main_t *vm = p1;
  slave_if_t *sif = p2;
  lacp_main_t *lm = &lacp_main;

  lacp_attach_mux_to_aggregator (vm, sif);
  sif->actor.state &= ~LACP_STATE_COLLECTING;
  bond_disable_collecting_distributing (vm, sif);
  sif->actor.state &= ~LACP_STATE_DISTRIBUTING;
  sif->ntt = 1;
  lacp_start_periodic_timer (lm->vlib_main, sif, 0);

  if ((sif->selected == LACP_PORT_UNSELECTED) ||
      (sif->selected == LACP_PORT_STANDBY))
    lacp_machine_dispatch (&lacp_mux_machine, vm, sif,
			   LACP_MUX_EVENT_UNSELECTED, &sif->mux_state);

  if ((sif->selected == LACP_PORT_SELECTED) &&
      (sif->partner.state & LACP_STATE_SYNCHRONIZATION))
    lacp_machine_dispatch (&lacp_mux_machine, vm, sif, LACP_MUX_EVENT_SYNC,
			   &sif->mux_state);
  return 0;
}

int
lacp_mux_action_waiting (void *p1, void *p2)
{
  vlib_main_t *vm = p1;
  slave_if_t *sif = p2;
  lacp_main_t *lm = &lacp_main;

  if (!lacp_timer_is_running (sif->wait_while_timer))
    lacp_start_wait_while_timer (lm->vlib_main, sif,
				 LACP_AGGREGATE_WAIT_TIME);

  if ((sif->selected == LACP_PORT_SELECTED) && sif->ready)
    lacp_machine_dispatch (&lacp_mux_machine, vm, sif,
			   LACP_MUX_EVENT_READY, &sif->mux_state);

  if (sif->selected == LACP_PORT_UNSELECTED)
    lacp_machine_dispatch (&lacp_mux_machine, vm, sif,
			   LACP_MUX_EVENT_UNSELECTED, &sif->mux_state);

  return 0;
}

int
lacp_mux_action_collecting_distributing (void *p1, void *p2)
{
  vlib_main_t *vm = p1;
  slave_if_t *sif = p2;
  lacp_main_t *lm = &lacp_main;

  sif->actor.state |= LACP_STATE_SYNCHRONIZATION | LACP_STATE_COLLECTING |
    LACP_STATE_DISTRIBUTING;
  bond_enable_collecting_distributing (vm, sif);
  sif->ntt = 1;
  lacp_start_periodic_timer (lm->vlib_main, sif, 0);
  if ((sif->selected == LACP_PORT_UNSELECTED) ||
      (sif->selected == LACP_PORT_STANDBY) ||
      !(sif->partner.state & LACP_STATE_SYNCHRONIZATION))
    lacp_machine_dispatch (&lacp_mux_machine, vm, sif,
			   LACP_MUX_EVENT_UNSELECTED, &sif->mux_state);


  return 0;
}

static u8 *
format_mux_event (u8 * s, va_list * args)
{
  static lacp_event_struct lacp_mux_event_array[] = {
#define _(b, s, n) {.bit = b, .str = #s, },
    foreach_lacp_mux_event
#undef _
    {.str = NULL}
  };
  int e = va_arg (*args, int);
  lacp_event_struct *event_entry = lacp_mux_event_array;

  if (e >= (sizeof (lacp_mux_event_array) / sizeof (*event_entry)))
    s = format (s, "Bad event %d", e);
  else
    s = format (s, "%s", event_entry[e].str);

  return s;
}

void
lacp_mux_debug_func (slave_if_t * sif, int event, int state,
		     lacp_fsm_state_t * transition)
{
  vlib_worker_thread_t *w = vlib_worker_threads + os_get_thread_index ();
  /* *INDENT-OFF* */
  ELOG_TYPE_DECLARE (e) =
    {
      .format = "%s",
      .format_args = "T4",
    };
  /* *INDENT-ON* */
  struct
  {
    u32 event;
  } *ed = 0;

  ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track);
  ed->event =
    elog_string (&vlib_global_main.elog_main, "%U-MUX: %U, %U->%U%c",
		 format_vnet_sw_if_index_name, vnet_get_main (),
		 sif->sw_if_index, format_mux_event, event,
		 format_mux_sm_state, state, format_mux_sm_state,
		 transition->next_state, 0);
}

void
lacp_init_mux_machine (vlib_main_t * vm, slave_if_t * sif)
{
  lacp_machine_dispatch (&lacp_mux_machine, vm, sif, LACP_MUX_EVENT_BEGIN,
			 &sif->mux_state);
}

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
e_mpls() self.pg1.disable_mpls() for i in self.pg_interfaces: i.unconfig_ip4() i.unconfig_ip6() i.set_table_ip4(0) i.set_table_ip6(0) i.admin_down() super(TestMPLSPIC, self).tearDown() def test_mpls_ibgp_pic(self): """ MPLS iBGP PIC edge convergence 1) setup many iBGP VPN routes via a pair of iBGP peers. 2) Check EMCP forwarding to these peers 3) withdraw the IGP route to one of these peers. 4) check forwarding continues to the remaining peer """ # # IGP+LDP core routes # core_10_0_0_45 = VppIpRoute(self, "10.0.0.45", 32, [VppRoutePath(self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[45])]) core_10_0_0_45.add_vpp_config() core_10_0_0_46 = VppIpRoute(self, "10.0.0.46", 32, [VppRoutePath(self.pg1.remote_ip4, self.pg1.sw_if_index, labels=[46])]) core_10_0_0_46.add_vpp_config() # # Lot's of VPN routes. We need more the 64 so VPP will build # the fast convergence indirection # vpn_routes = [] pkts = [] for ii in range(64): dst = "192.168.1.%d" % ii vpn_routes.append(VppIpRoute(self, dst, 32, [VppRoutePath("10.0.0.45", 0xffffffff, labels=[145], is_resolve_host=1), VppRoutePath("10.0.0.46", 0xffffffff, labels=[146], is_resolve_host=1)], table_id=1)) vpn_routes[ii].add_vpp_config() pkts.append(Ether(dst=self.pg2.local_mac, src=self.pg2.remote_mac) / IP(src=self.pg2.remote_ip4, dst=dst) / UDP(sport=1234, dport=1234) / Raw('\xa5' * 100)) # # Send the packet stream (one pkt to each VPN route) # - expect a 50-50 split of the traffic # self.pg2.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx0 = self.pg0._get_capture(1) rx1 = self.pg1._get_capture(1) # not testig the LB hashing algorithm so we're not concerned # with the split ratio, just as long as neither is 0 self.assertNotEqual(0, len(rx0)) self.assertNotEqual(0, len(rx1)) # # use a test CLI command to stop the FIB walk process, this # will prevent the FIB converging the VPN routes and thus allow # us to probe the interim (psot-fail, pre-converge) state # self.vapi.ppcli("test fib-walk-process disable") # # Withdraw one of the IGP routes # core_10_0_0_46.remove_vpp_config() # # now all packets should be forwarded through the remaining peer # self.vapi.ppcli("clear trace") self.pg2.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx0 = self.pg0.get_capture(len(pkts)) # # enable the FIB walk process to converge the FIB # self.vapi.ppcli("test fib-walk-process enable") # # packets should still be forwarded through the remaining peer # self.pg2.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx0 = self.pg0.get_capture(64) # # Add the IGP route back and we return to load-balancing # core_10_0_0_46.add_vpp_config() self.pg2.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx0 = self.pg0._get_capture(1) rx1 = self.pg1._get_capture(1) self.assertNotEqual(0, len(rx0)) self.assertNotEqual(0, len(rx1)) def test_mpls_ebgp_pic(self): """ MPLS eBGP PIC edge convergence 1) setup many eBGP VPN routes via a pair of eBGP peers 2) Check EMCP forwarding to these peers 3) withdraw one eBGP path - expect LB across remaining eBGP """ # # Lot's of VPN routes. We need more the 64 so VPP will build # the fast convergence indirection # vpn_routes = [] vpn_bindings = [] pkts = [] for ii in range(64): dst = "192.168.1.%d" % ii local_label = 1600 + ii vpn_routes.append(VppIpRoute(self, dst, 32, [VppRoutePath(self.pg2.remote_ip4, 0xffffffff, nh_table_id=1, is_resolve_attached=1), VppRoutePath(self.pg3.remote_ip4, 0xffffffff, nh_table_id=1, is_resolve_attached=1)], table_id=1)) vpn_routes[ii].add_vpp_config() vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 32, ip_table_id=1)) vpn_bindings[ii].add_vpp_config() pkts.append(Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) / MPLS(label=local_label, ttl=64) / IP(src=self.pg0.remote_ip4, dst=dst) / UDP(sport=1234, dport=1234) / Raw('\xa5' * 100)) self.pg0.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx0 = self.pg2._get_capture(1) rx1 = self.pg3._get_capture(1) self.assertNotEqual(0, len(rx0)) self.assertNotEqual(0, len(rx1)) # # use a test CLI command to stop the FIB walk process, this # will prevent the FIB converging the VPN routes and thus allow # us to probe the interim (psot-fail, pre-converge) state # self.vapi.ppcli("test fib-walk-process disable") # # withdraw the connected prefix on the interface. # self.pg2.unconfig_ip4() # # now all packets should be forwarded through the remaining peer # self.pg0.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx0 = self.pg3.get_capture(len(pkts)) # # enable the FIB walk process to converge the FIB # self.vapi.ppcli("test fib-walk-process enable") self.pg0.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx0 = self.pg3.get_capture(len(pkts)) # # put the connecteds back # self.pg2.config_ip4() self.pg0.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx0 = self.pg2._get_capture(1) rx1 = self.pg3._get_capture(1) self.assertNotEqual(0, len(rx0)) self.assertNotEqual(0, len(rx1)) def test_mpls_v6_ebgp_pic(self): """ MPLSv6 eBGP PIC edge convergence 1) setup many eBGP VPNv6 routes via a pair of eBGP peers 2) Check EMCP forwarding to these peers 3) withdraw one eBGP path - expect LB across remaining eBGP """ # # Lot's of VPN routes. We need more the 64 so VPP will build # the fast convergence indirection # vpn_routes = [] vpn_bindings = [] pkts = [] for ii in range(64): dst = "3000::%d" % ii local_label = 1600 + ii vpn_routes.append(VppIpRoute( self, dst, 128, [VppRoutePath(self.pg2.remote_ip6, 0xffffffff, nh_table_id=1, is_resolve_attached=1, proto=DpoProto.DPO_PROTO_IP6), VppRoutePath(self.pg3.remote_ip6, 0xffffffff, nh_table_id=1, proto=DpoProto.DPO_PROTO_IP6, is_resolve_attached=1)], table_id=1, is_ip6=1)) vpn_routes[ii].add_vpp_config() vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 128, ip_table_id=1, is_ip6=1)) vpn_bindings[ii].add_vpp_config() pkts.append(Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) / MPLS(label=local_label, ttl=64) / IPv6(src=self.pg0.remote_ip6, dst=dst) / UDP(sport=1234, dport=1234) / Raw('\xa5' * 100)) self.pg0.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx0 = self.pg2._get_capture(1) rx1 = self.pg3._get_capture(1) self.assertNotEqual(0, len(rx0)) self.assertNotEqual(0, len(rx1)) # # use a test CLI command to stop the FIB walk process, this # will prevent the FIB converging the VPN routes and thus allow # us to probe the interim (psot-fail, pre-converge) state # self.vapi.ppcli("test fib-walk-process disable") # # withdraw the connected prefix on the interface. # and shutdown the interface so the ND cache is flushed. # self.pg2.unconfig_ip6() self.pg2.admin_down() # # now all packets should be forwarded through the remaining peer # self.pg0.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx0 = self.pg3.get_capture(len(pkts)) # # enable the FIB walk process to converge the FIB # self.vapi.ppcli("test fib-walk-process enable") self.pg0.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx0 = self.pg3.get_capture(len(pkts)) # # put the connecteds back # self.pg2.admin_up() self.pg2.config_ip6() self.pg0.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx0 = self.pg2._get_capture(1) rx1 = self.pg3._get_capture(1) self.assertNotEqual(0, len(rx0)) self.assertNotEqual(0, len(rx1)) class TestMPLSL2(VppTestCase): """ MPLS-L2 """ def setUp(self): super(TestMPLSL2, self).setUp() # create 2 pg interfaces self.create_pg_interfaces(range(2)) # create the default MPLS table self.tables = [] tbl = VppMplsTable(self, 0) tbl.add_vpp_config() self.tables.append(tbl) # use pg0 as the core facing interface self.pg0.admin_up() self.pg0.config_ip4() self.pg0.resolve_arp() self.pg0.enable_mpls() # use the other 2 for customer facing L2 links for i in self.pg_interfaces[1:]: i.admin_up() def tearDown(self): for i in self.pg_interfaces[1:]: i.admin_down() self.pg0.disable_mpls() self.pg0.unconfig_ip4() self.pg0.admin_down() super(TestMPLSL2, self).tearDown() def verify_capture_tunneled_ethernet(self, capture, sent, mpls_labels): capture = verify_filter(capture, sent) self.assertEqual(len(capture), len(sent)) for i in range(len(capture)): tx = sent[i] rx = capture[i] # the MPLS TTL is 255 since it enters a new tunnel verify_mpls_stack(self, rx, mpls_labels) tx_eth = tx[Ether] rx_eth = Ether(str(rx[MPLS].payload)) self.assertEqual(rx_eth.src, tx_eth.src) self.assertEqual(rx_eth.dst, tx_eth.dst) def test_vpws(self): """ Virtual Private Wire Service """ # # Create an MPLS tunnel that pushes 1 label # For Ethernet over MPLS the uniform mode is irrelevant since ttl/cos # information is not in the packet, but we test it works anyway # mpls_tun_1 = VppMPLSTunnelInterface( self, [VppRoutePath(self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[VppMplsLabel(42, MplsLspMode.UNIFORM)])], is_l2=1) mpls_tun_1.add_vpp_config() mpls_tun_1.admin_up() # # Create a label entry to for 55 that does L2 input to the tunnel # route_55_eos = VppMplsRoute( self, 55, 1, [VppRoutePath("0.0.0.0", mpls_tun_1.sw_if_index, is_interface_rx=1, proto=DpoProto.DPO_PROTO_ETHERNET)]) route_55_eos.add_vpp_config() # # Cross-connect the tunnel with one of the customers L2 interfaces # self.vapi.sw_interface_set_l2_xconnect(self.pg1.sw_if_index, mpls_tun_1.sw_if_index, enable=1) self.vapi.sw_interface_set_l2_xconnect(mpls_tun_1.sw_if_index, self.pg1.sw_if_index, enable=1) # # inject a packet from the core # pcore = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) / MPLS(label=55, ttl=64) / Ether(dst="00:00:de:ad:ba:be", src="00:00:de:ad:be:ef") / IP(src="10.10.10.10", dst="11.11.11.11") / UDP(sport=1234, dport=1234) / Raw('\xa5' * 100)) tx0 = pcore * 65 rx0 = self.send_and_expect(self.pg0, tx0, self.pg1) payload = pcore[MPLS].payload self.assertEqual(rx0[0][Ether].dst, payload[Ether].dst) self.assertEqual(rx0[0][Ether].src, payload[Ether].src) # # Inject a packet from the custoer/L2 side # tx1 = pcore[MPLS].payload * 65 rx1 = self.send_and_expect(self.pg1, tx1, self.pg0) self.verify_capture_tunneled_ethernet(rx1, tx1, [VppMplsLabel(42)]) def test_vpls(self): """ Virtual Private LAN Service """ # # Create an L2 MPLS tunnel # mpls_tun = VppMPLSTunnelInterface( self, [VppRoutePath(self.pg0.remote_ip4, self.pg0.sw_if_index, labels=[VppMplsLabel(42)])], is_l2=1) mpls_tun.add_vpp_config() mpls_tun.admin_up() # # Create a label entry to for 55 that does L2 input to the tunnel # route_55_eos = VppMplsRoute( self, 55, 1, [VppRoutePath("0.0.0.0", mpls_tun.sw_if_index, is_interface_rx=1, proto=DpoProto.DPO_PROTO_ETHERNET)]) route_55_eos.add_vpp_config() # # add to tunnel to the customers bridge-domain # self.vapi.sw_interface_set_l2_bridge(mpls_tun.sw_if_index, bd_id=1) self.vapi.sw_interface_set_l2_bridge(self.pg1.sw_if_index, bd_id=1) # # Packet from the customer interface and from the core # p_cust = (Ether(dst="00:00:de:ad:ba:be", src="00:00:de:ad:be:ef") / IP(src="10.10.10.10", dst="11.11.11.11") / UDP(sport=1234, dport=1234) / Raw('\xa5' * 100)) p_core = (Ether(src="00:00:de:ad:ba:be", dst="00:00:de:ad:be:ef") / IP(dst="10.10.10.10", src="11.11.11.11") / UDP(sport=1234, dport=1234) / Raw('\xa5' * 100)) # # The BD is learning, so send in one of each packet to learn # p_core_encap = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) / MPLS(label=55, ttl=64) / p_core) self.pg1.add_stream(p_cust) self.pg_enable_capture(self.pg_interfaces) self.pg_start() self.pg0.add_stream(p_core_encap) self.pg_enable_capture(self.pg_interfaces) self.pg_start() # we've learnt this so expect it be be forwarded rx0 = self.pg1.get_capture(1) self.assertEqual(rx0[0][Ether].dst, p_core[Ether].dst) self.assertEqual(rx0[0][Ether].src, p_core[Ether].src) # # now a stream in each direction # self.pg1.add_stream(p_cust * 65) self.pg_enable_capture(self.pg_interfaces) self.pg_start() rx0 = self.pg0.get_capture(65) self.verify_capture_tunneled_ethernet(rx0, p_cust*65, [VppMplsLabel(42)]) # # remove interfaces from customers bridge-domain # self.vapi.sw_interface_set_l2_bridge(mpls_tun.sw_if_index, bd_id=1, enable=0) self.vapi.sw_interface_set_l2_bridge(self.pg1.sw_if_index, bd_id=1, enable=0) if __name__ == '__main__': unittest.main(testRunner=VppTestRunner)