aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenoît Ganne <bganne@cisco.com>2019-07-31 14:29:00 +0200
committerNeale Ranns <nranns@cisco.com>2019-08-07 08:56:03 +0000
commit388f418a8e425366190b7fa79dd1d112efdbf149 (patch)
tree2788ea4e9fab6ea9de34dfc984882e3ea4f026b7
parente3282bae27064b3d9788e235db50441fd2fe1a70 (diff)
gbp: add l3out redirect to remote SEP unit test
Type: test Change-Id: I0fafaebbb1f70488e0bc7d21da778d55fe15c3f5 Signed-off-by: Benoît Ganne <bganne@cisco.com>
-rw-r--r--test/test_gbp.py172
1 files changed, 172 insertions, 0 deletions
diff --git a/test/test_gbp.py b/test/test_gbp.py
index f6bded6bc90..0c9cdb415dd 100644
--- a/test/test_gbp.py
+++ b/test/test_gbp.py
@@ -3848,6 +3848,178 @@ class TestGBP(VppTestCase):
self.assertEqual(rx[IPv6].dst, "2001:10::88")
#
+ # redirect to programmed remote SEP in EPG 320
+ #
+
+ # gbp vxlan tunnel for the remote SEP
+ vx_tun_l3_sep = VppGbpVxlanTunnel(
+ self, 555, rd1.rd_id,
+ VppEnum.vl_api_gbp_vxlan_tunnel_mode_t.GBP_VXLAN_TUNNEL_MODE_L3,
+ self.pg2.local_ip4)
+ vx_tun_l3_sep.add_vpp_config()
+
+ # remote SEP
+ sep5 = VppGbpEndpoint(self, vx_tun_l3_sep,
+ epg_320, None,
+ "12.0.0.10", "13.0.0.10",
+ "4001:10::10", "5001:10::10",
+ ep_flags.GBP_API_ENDPOINT_FLAG_REMOTE,
+ self.pg7.local_ip4,
+ self.pg7.remote_ip4,
+ mac=None)
+ sep5.add_vpp_config()
+
+ #
+ # redirect from local l3out to remote (known, then unknown) SEP
+ #
+
+ # add local l3out
+ # the external bd
+ self.loop4.set_mac(self.router_mac)
+ VppIpInterfaceBind(self, self.loop4, t4).add_vpp_config()
+ VppIpInterfaceBind(self, self.loop4, t6).add_vpp_config()
+ ebd = VppBridgeDomain(self, 100)
+ ebd.add_vpp_config()
+ gebd = VppGbpBridgeDomain(self, ebd, rd1, self.loop4, None, None)
+ gebd.add_vpp_config()
+ # the external epg
+ eepg = VppGbpEndpointGroup(self, 888, 765, rd1, gebd,
+ None, gebd.bvi,
+ "10.1.0.128",
+ "2001:10:1::128",
+ VppGbpEndpointRetention(2))
+ eepg.add_vpp_config()
+ # add subnets to BVI
+ VppIpInterfaceAddress(
+ self,
+ gebd.bvi,
+ "10.1.0.128",
+ 24).add_vpp_config()
+ VppIpInterfaceAddress(
+ self,
+ gebd.bvi,
+ "2001:10:1::128",
+ 64).add_vpp_config()
+ # ... which are L3-out subnets
+ VppGbpSubnet(self, rd1, "10.1.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=765).add_vpp_config()
+ VppGbpSubnet(self, rd1, "2001:10:1::128", 64,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=765).add_vpp_config()
+ # external endpoints
+ VppL2Vtr(self, self.vlan_100, L2_VTR_OP.L2_POP_1).add_vpp_config()
+ eep1 = VppGbpEndpoint(self, self.vlan_100, eepg, None, "10.1.0.1",
+ "11.1.0.1", "2001:10:1::1", "3001:10:1::1",
+ ep_flags.GBP_API_ENDPOINT_FLAG_EXTERNAL)
+ eep1.add_vpp_config()
+ VppL2Vtr(self, self.vlan_101, L2_VTR_OP.L2_POP_1).add_vpp_config()
+ eep2 = VppGbpEndpoint(self, self.vlan_101, eepg, None, "10.1.0.2",
+ "11.1.0.2", "2001:10:1::2", "3001:10:1::2",
+ ep_flags.GBP_API_ENDPOINT_FLAG_EXTERNAL)
+ eep2.add_vpp_config()
+
+ # external subnets reachable though eep1 and eep2 respectively
+ VppIpRoute(self, "10.220.0.0", 24,
+ [VppRoutePath(eep1.ip4.address, eep1.epg.bvi.sw_if_index)],
+ table_id=t4.table_id).add_vpp_config()
+ VppGbpSubnet(self, rd1, "10.220.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4220).add_vpp_config()
+ VppIpRoute(self, "10:220::", 64,
+ [VppRoutePath(eep1.ip6.address, eep1.epg.bvi.sw_if_index)],
+ table_id=t6.table_id).add_vpp_config()
+ VppGbpSubnet(self, rd1, "10:220::", 64,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4220).add_vpp_config()
+ VppIpRoute(self, "10.221.0.0", 24,
+ [VppRoutePath(eep2.ip4.address, eep2.epg.bvi.sw_if_index)],
+ table_id=t4.table_id).add_vpp_config()
+ VppGbpSubnet(self, rd1, "10.221.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4221).add_vpp_config()
+ VppIpRoute(self, "10:221::", 64,
+ [VppRoutePath(eep2.ip6.address, eep2.epg.bvi.sw_if_index)],
+ table_id=t6.table_id).add_vpp_config()
+ VppGbpSubnet(self, rd1, "10:221::", 64,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4221).add_vpp_config()
+
+ # packets from 1 external subnet to the other
+ p = [(Ether(src=eep1.mac, dst=self.router_mac) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.17", dst="10.221.0.65") /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100)),
+ (Ether(src=eep1.mac, dst=self.router_mac) /
+ Dot1Q(vlan=100) /
+ IPv6(src="10:220::17", dst="10:221::65") /
+ UDP(sport=1234, dport=1234) /
+ Raw('\xa5' * 100))]
+
+ # packets should be dropped in absence of contract
+ self.send_and_assert_no_replies(self.pg0, p)
+
+ # contract redirecting to sep5
+ VppGbpContract(
+ self, 402, 4220, 4221, acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
+ [VppGbpContractNextHop(sep5.vmac, sep5.epg.bd,
+ sep5.ip4, sep5.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
+ [VppGbpContractNextHop(sep5.vmac, sep5.epg.bd,
+ sep5.ip6, sep5.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6]).add_vpp_config()
+
+ rxs = self.send_and_expect(self.pg0, p, self.pg7)
+
+ for rx, tx in zip(rxs, p):
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ # this should use the programmed remote leaf TEP
+ self.assertEqual(rx[VXLAN].vni, 555)
+ self.assertEqual(rx[VXLAN].gpid, 4220)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # redirect policy has been applied
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertTrue(rx[VXLAN].gpflags.D)
+ rxip = rx[VXLAN][Ether].payload
+ txip = tx[Dot1Q].payload
+ self.assertEqual(rxip.src, txip.src)
+ self.assertEqual(rxip.dst, txip.dst)
+
+ # remove SEP: it is now an unknown remote SEP and should go
+ # to spine proxy
+ sep5.remove_vpp_config()
+
+ rxs = self.send_and_expect(self.pg0, p, self.pg7)
+
+ for rx, tx in zip(rxs, p):
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ # this should use the spine proxy TEP
+ self.assertEqual(rx[VXLAN].vni, epg_320.bd.uu_fwd.vni)
+ self.assertEqual(rx[VXLAN].gpid, 4220)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # redirect policy has been applied
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertTrue(rx[VXLAN].gpflags.D)
+ rxip = rx[VXLAN][Ether].payload
+ txip = tx[Dot1Q].payload
+ self.assertEqual(rxip.src, txip.src)
+ self.assertEqual(rxip.dst, txip.dst)
+
+ #
# cleanup
#
self.pg7.unconfig_ip4()
clude <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <sys/mman.h> /** Maplog log file header segment. In a separate file */ typedef struct { u8 maplog_major_version; /**< library major version number */ u8 maplog_minor_version; /**< library minor version number */ u8 maplog_patch_version; /**< library patch version number */ u8 maplog_flag_wrapped; /**< log has wrapped */ u32 application_id; /**< application identifier */ u8 application_major_version; /**< application major version number */ u8 application_minor_version; /**< application minor version number */ u8 application_patch_version; /**< application patch version number */ u8 maplog_flag_circular; /**< log is circular */ u32 record_size_in_cachelines; /**< record size in cache lines */ u32 cacheline_size; /**< cache line size */ u64 file_size_in_records; /**< file size in records */ u64 number_of_records; /**< number of records in entire log */ u64 number_of_files; /**< number of files in entire log */ u8 file_basename[256]; /**< file basename */ } clib_maplog_header_t; #define MAPLOG_MAJOR_VERSION 1 #define MAPLOG_MINOR_VERSION 1 #define MAPLOG_PATCH_VERSION 0 /** Process-private main data structure */ typedef struct { CLIB_CACHE_LINE_ALIGN_MARK (cacheline1); /** rw cache line: atomic ticket-counter, file index */ volatile u64 next_record_index; /** file size in records, rounded to a power of two */ u64 file_size_in_records; u32 log2_file_size_in_records; /**< lg file size in records */ volatile u32 current_file_index; /**< current file index */ volatile u32 flags; /**< flags, currently just "init" or not */ /* read-mostly cache line: size parameters, file names, etc. */ CLIB_CACHE_LINE_ALIGN_MARK (cacheline2); u32 record_size_in_cachelines; /**< record size in cache lines */ /* double-buffered mmap'ed logfiles */ volatile u8 *file_baseva[2]; /**< active segment base addresses */ u8 *filenames[2]; /**< active segment file names */ /* vector not c-string */ u8 *file_basename; /**< basename, e.g. "/tmp/mylog" */ u8 *header_filename; /**< log header file name */ } clib_maplog_main_t; /* flag bits */ #define CLIB_MAPLOG_FLAG_INIT (1<<0) #define CLIB_MAPLOG_FLAG_CIRCULAR (1<<1) #define CLIB_MAPLOG_FLAG_WRAPPED (1<<2) /** log initialization structure */ typedef struct { clib_maplog_main_t *mm; /**< pointer to the main structure */ char *file_basename; /**< file base name */ u64 file_size_in_bytes; /**< file size in bytes */ u32 record_size_in_bytes; /**< record size in bytes */ u32 application_id; /**< application identifier */ u8 application_major_version; /**< application major version number */ u8 application_minor_version; /**< application minor version number */ u8 application_patch_version; /**< application patch version number */ u8 maplog_is_circular; /**< single, circular log */ } clib_maplog_init_args_t; /* function prototypes */ int clib_maplog_init (clib_maplog_init_args_t * ap); void clib_maplog_update_header (clib_maplog_main_t * mm); void clib_maplog_close (clib_maplog_main_t * mm); int clib_maplog_process (char *file_basename, void *fp_arg); format_function_t format_maplog_header; u8 *_clib_maplog_get_entry_slowpath (clib_maplog_main_t * mm, u64 my_record_index); /** * @brief Obtain a log entry pointer * * Increments the atomic ticket counter, and returns a pointer to * the newly-allocated log entry. The slowpath function replaces * a full log segment with a new/fresh/empty log segment * * @param[in] mm maplog object pointer * @return pointer to the allocated log entry */ static inline void * clib_maplog_get_entry (clib_maplog_main_t * mm) { u64 my_record_index; u8 *rv; ASSERT (mm->flags & CLIB_MAPLOG_FLAG_INIT); my_record_index = clib_atomic_fetch_add (&mm->next_record_index, 1); /* Time to unmap and create a new logfile? */ if (PREDICT_FALSE ((my_record_index & (mm->file_size_in_records - 1)) == 0)) { /* Regular log? Switch file... */ if (!(mm->flags & CLIB_MAPLOG_FLAG_CIRCULAR)) { /* Yes, but not the very first time... (;-)... */ if (my_record_index) return _clib_maplog_get_entry_slowpath (mm, my_record_index); } else /* Circular log: set the wrap bit and move along */ mm->flags |= CLIB_MAPLOG_FLAG_WRAPPED; /* FALLTHROUGH */ } rv = (u8 *) mm->file_baseva[(my_record_index >> mm->log2_file_size_in_records) & 1] + (my_record_index & (mm->file_size_in_records - 1)) * mm->record_size_in_cachelines * CLIB_CACHE_LINE_BYTES; return rv; } #endif /* __included_maplog_h__ */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */