aboutsummaryrefslogtreecommitdiffstats
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/Makefile41
-rw-r--r--test/bfd.py423
-rw-r--r--test/doc/Makefile5
-rw-r--r--test/doc/conf.py13
-rw-r--r--test/test_abf.py330
-rw-r--r--test/test_acl_plugin.py1438
-rw-r--r--test/test_acl_plugin_conns.py405
-rw-r--r--test/test_acl_plugin_l2l3.py864
-rw-r--r--test/test_acl_plugin_macip.py1278
-rw-r--r--test/test_adl.py103
-rw-r--r--test/test_arping.py251
-rw-r--r--test/test_bfd.py2763
-rw-r--r--test/test_bier.py862
-rw-r--r--test/test_bihash.py74
-rw-r--r--test/test_bond.py321
-rw-r--r--test/test_buffers.py29
-rw-r--r--test/test_cdp.py155
-rw-r--r--test/test_classifier.py569
-rw-r--r--test/test_classifier_ip6.py490
-rw-r--r--test/test_classify_l2_acl.py608
-rw-r--r--test/test_cli.py88
-rw-r--r--test/test_cnat.py975
-rw-r--r--test/test_counters.py39
-rw-r--r--test/test_crypto.py28
-rw-r--r--test/test_det44.py682
-rw-r--r--test/test_dhcp.py1686
-rw-r--r--test/test_dhcp6.py805
-rw-r--r--test/test_dns.py109
-rw-r--r--test/test_dslite.py341
-rw-r--r--test/test_dvr.py410
-rw-r--r--test/test_endian.py38
-rw-r--r--test/test_fib.py48
-rw-r--r--test/test_flowprobe.py1094
-rw-r--r--test/test_gbp.py5926
-rw-r--r--test/test_geneve.py307
-rw-r--r--test/test_gre.py1296
-rw-r--r--test/test_gro.py142
-rw-r--r--test/test_gso.py722
-rw-r--r--test/test_gtpu.py413
-rw-r--r--test/test_igmp.py837
-rw-r--r--test/test_ikev2.py2059
-rw-r--r--test/test_ipsec_nat.py271
-rw-r--r--test/test_l2tp.py49
-rw-r--r--test/test_l3xc.py152
-rw-r--r--test/test_lacp.py364
-rw-r--r--test/test_lb.py502
-rw-r--r--test/test_lb_api.py76
-rw-r--r--test/test_linux_cp.py174
-rw-r--r--test/test_lisp.py219
-rw-r--r--test/test_mactime.py160
-rw-r--r--test/test_map.py964
-rw-r--r--test/test_map_br.py694
-rw-r--r--test/test_memif.py308
-rw-r--r--test/test_mss_clamp.py295
-rw-r--r--test/test_nat44_ed.py3662
-rw-r--r--test/test_nat44_ei.py4280
-rw-r--r--test/test_nat64.py1937
-rw-r--r--test/test_nat66.py179
-rw-r--r--test/test_ping.py176
-rw-r--r--test/test_pnat.py203
-rw-r--r--test/test_policer.py117
-rw-r--r--test/test_policer_input.py146
-rw-r--r--test/test_pppoe.py611
-rw-r--r--test/test_quic.py554
-rw-r--r--test/test_srv6.py2147
-rw-r--r--test/test_srv6_ad.py809
-rw-r--r--test/test_srv6_ad_flow.py637
-rwxr-xr-xtest/test_srv6_as.py887
-rw-r--r--test/test_srv6_mobile.py340
-rw-r--r--test/test_svs.py342
-rw-r--r--test/test_urpf.py305
-rw-r--r--test/test_vapi.py80
-rw-r--r--test/test_vpe_api.py55
-rw-r--r--test/test_vppinfra.py40
-rw-r--r--test/test_vrrp.py1293
-rw-r--r--test/test_vxlan.py421
-rw-r--r--test/test_vxlan6.py316
-rw-r--r--test/test_vxlan_gbp.py293
-rw-r--r--test/test_vxlan_gpe.py265
-rwxr-xr-xtest/test_wireguard.py748
-rw-r--r--test/vpp_acl.py476
-rw-r--r--test/vpp_bier.py293
-rw-r--r--test/vpp_bond_interface.py52
-rw-r--r--test/vpp_dhcp.py131
-rw-r--r--test/vpp_igmp.py75
-rw-r--r--test/vpp_ikev2.py179
-rw-r--r--test/vpp_lb.py84
-rw-r--r--test/vpp_memif.py140
-rw-r--r--test/vpp_pppoe_interface.py42
-rw-r--r--test/vpp_srv6.py198
-rw-r--r--test/vpp_vxlan_gbp_tunnel.py75
-rw-r--r--test/vpp_vxlan_tunnel.py87
92 files changed, 55932 insertions, 38 deletions
diff --git a/test/Makefile b/test/Makefile
index 0ee61a23c0b..82095883b55 100644
--- a/test/Makefile
+++ b/test/Makefile
@@ -13,14 +13,10 @@ ifndef TEST_DIR
$(error TEST_DIR is not set)
endif
-export TEST_BR = $(BR)/build-test
-export TEST_DOC_BR = $(TEST_BR)/doc
-export BUILD_TEST_SRC = $(TEST_BR)/src
+export TEST_BR = $(TEST_DIR)
+export TEST_DOC_BR = $(TEST_DIR)/doc/build
FAILED_DIR=/tmp/vpp-failed-unittests/
-PLUGIN_TEST_DIRS=$(shell find $(PLUGIN_SRC_DIR) -type d -name test -exec echo -n " -d {}" \;)
-CORE_TEST_DIRS=$(shell find $(WS_ROOT)/src -not \( -path $(INTERN_PLUGIN_SRC_DIR) -prune \) -type d -name test -exec echo -n " -d {}" \;)
-VPP_TEST_DIRS=$(shell ls -d $(TEST_DIR)$(PLUGIN_TEST_DIRS)$(CORE_TEST_DIRS) $(EXTERN_TESTS))
-VPP_TEST_SRC=$(shell for dir in $(VPP_TEST_DIRS) ; do ls $$dir/*.py 2>/dev/null; done)
+VPP_TEST_DIRS=$(shell ls -d $(TEST_DIR) $(EXTERN_TESTS))
FORCE_NO_WIPE=0
ifeq ($(DEBUG),gdb)
@@ -70,7 +66,7 @@ ifneq ($(EXTERN_TESTS),)
UNITTEST_EXTRA_OPTS=$(UNITTEST_FAILFAST_OPTS) -d $(EXTERN_TESTS)
endif
-VENV_PATH=$(TEST_BR)/venv
+VENV_PATH=$(TEST_DIR)/venv
ifeq ($(TEST_DEBUG),1)
VENV_RUN_DIR:=$(VENV_PATH)/run-debug
@@ -84,11 +80,6 @@ else
PYTHON_INTERP=$(PYTHON)
endif
-empty:=
-space:= $(empty) $(empty)
-export PYTHONPATH=$(subst $(space),:,$(VPP_TEST_DIRS))
-export PYTHONPYCACHEPREFIX=$(TEST_BR)/pycache
-
PYTHON_VERSION=$(shell $(PYTHON_INTERP) -c 'import sys; print(sys.version_info.major)')
PIP_VERSION=20.1.1
# Keep in sync with requirements.txt
@@ -158,7 +149,7 @@ PLUGIN_SRC_DIR=$(INTERN_PLUGIN_SRC_DIR)
endif
define retest-func
-@env VPP_IN_GDB=$(VPP_IN_GDB) FORCE_FOREGROUND=$(FORCE_FOREGROUND) FAILED_DIR=$(FAILED_DIR) VENV_PATH=$(VENV_PATH) scripts/setsid_wrapper.sh $(FORCE_FOREGROUND) $(VENV_PATH)/bin/activate $(PYTHON_INTERP) $(PYTHON_PROFILE_OPTS) $(BUILD_TEST_SRC)/run_tests.py -d $(BUILD_TEST_SRC) $(UNITTEST_EXTRA_OPTS) || env FAILED_DIR=$(FAILED_DIR) COMPRESS_FAILED_TEST_LOGS=$(COMPRESS_FAILED_TEST_LOGS) scripts/compress_failed.sh
+@env VPP_IN_GDB=$(VPP_IN_GDB) FORCE_FOREGROUND=$(FORCE_FOREGROUND) FAILED_DIR=$(FAILED_DIR) VENV_PATH=$(VENV_PATH) scripts/setsid_wrapper.sh $(FORCE_FOREGROUND) $(VENV_PATH)/bin/activate $(PYTHON_INTERP) $(PYTHON_PROFILE_OPTS) run_tests.py -d $(TEST_DIR) $(UNITTEST_EXTRA_OPTS) || env FAILED_DIR=$(FAILED_DIR) COMPRESS_FAILED_TEST_LOGS=$(COMPRESS_FAILED_TEST_LOGS) scripts/compress_failed.sh
endef
.PHONY: sanity
@@ -167,8 +158,8 @@ ifeq ($(SANITY),no)
SANITY_IMPORT_VPP_PAPI_CMD=true
SANITY_RUN_VPP_CMD=true
else
-SANITY_IMPORT_VPP_PAPI_CMD=source $(VENV_PATH)/bin/activate && $(PYTHON_INTERP) $(BUILD_TEST_SRC)/sanity_import_vpp_papi.py
-SANITY_RUN_VPP_CMD=source $(VENV_PATH)/bin/activate && $(PYTHON_INTERP) $(BUILD_TEST_SRC)/sanity_run_vpp.py
+SANITY_IMPORT_VPP_PAPI_CMD=source $(VENV_PATH)/bin/activate && $(PYTHON_INTERP) sanity_import_vpp_papi.py
+SANITY_RUN_VPP_CMD=source $(VENV_PATH)/bin/activate && $(PYTHON_INTERP) sanity_run_vpp.py
endif
ifndef TEST_JOBS
@@ -207,16 +198,11 @@ sanity: test-dep
ext-test-apps:
make -C ext test-apps
-$(BUILD_TEST_SRC): verify-env
- @rm -rf $@
- @mkdir -p $@
- @for file in $(VPP_TEST_SRC); do if [ ! -L $$file ] && [ ! -e $(BUILD_TEST_SRC)/$$(basename $$file) ] ; then ln -s $$file $(BUILD_TEST_SRC) ; fi ; done
-
$(FAILED_DIR): reset
@mkdir -p $@
.PHONY: test-dep
-test-dep: $(BUILD_TEST_SRC) $(PAPI_INSTALL_DONE) $(FAILED_DIR)
+test-dep: $(PAPI_INSTALL_DONE) $(FAILED_DIR)
.PHONY: test
test: test-dep ext-test-apps sanity
@@ -229,8 +215,6 @@ retest: verify-env sanity $(FAILED_DIR)
.PHONY: shell
shell: test-dep
@echo "source $(VENV_PATH)/bin/activate;\
- cd $(BUILD_TEST_SRC);\
- export PYTHONPATH=$(PYTHONPATH);\
export RND_SEED=$(RND_SEED);\
echo '***';\
echo PYTHONPATH=$(PYTHONPATH);\
@@ -259,14 +243,13 @@ wipe: reset
@make -C ext clean
@rm -rf $(VENV_PATH)
@rm -rf $(patsubst %,%/__pycache__, $(VPP_TEST_DIRS))
- @rm -rf $(BUILD_TEST_SRC)
$(TEST_DOC_BR): $(PIP_INSTALL_DONE)
@mkdir -p $@
@bash -c "source $(VENV_PATH)/bin/activate && make -C doc html"
.PHONY: doc
-doc: $(BUILD_TEST_SRC) $(PIP_PATCH_DONE) $(TEST_DOC_BR)
+doc: $(PIP_PATCH_DONE) $(TEST_DOC_BR)
@echo
@echo "Test Documentation URL: $(TEST_DOC_BR)/html/index.html"
@echo "Run 'make test-wipe-doc test-doc' to rebuild the test docs"
@@ -305,7 +288,7 @@ wipe-all: wipe wipe-papi wipe-doc wipe-cov
@rm -rf $(TEST_BR)
.PHONY: checkstyle-diff
-checkstyle-diff: $(BUILD_TEST_SRC) $(PIP_INSTALL_DONE)
+checkstyle-diff: $(PIP_INSTALL_DONE)
@bash -c "source $(VENV_PATH)/bin/activate &&\
$(PYTHON_INTERP) -m pip install pycodestyle"
@bash -c "source $(VENV_PATH)/bin/activate &&\
@@ -326,11 +309,11 @@ start-gdb: sanity
$(call retest-func)
.PHONY: checkstyle
-checkstyle: $(BUILD_TEST_SRC) $(PIP_INSTALL_DONE)
+checkstyle: $(PIP_INSTALL_DONE)
@bash -c "source $(VENV_PATH)/bin/activate &&\
$(PYTHON_INTERP) -m pip install pycodestyle"
@bash -c "source $(VENV_PATH)/bin/activate &&\
- pycodestyle --show-source --ignore=W504,E126,E241,E226,E305,E704,E741,E722 -v $(BUILD_TEST_SRC)/*.py ||\
+ pycodestyle --show-source --ignore=W504,E126,E241,E226,E305,E704,E741,E722 -v *.py ||\
(echo \"*******************************************************************\" &&\
echo \"* Test framework PEP8 compliance check FAILED (checked all files)\" &&\
echo \"*******************************************************************\" &&\
diff --git a/test/bfd.py b/test/bfd.py
new file mode 100644
index 00000000000..9d44425ec9f
--- /dev/null
+++ b/test/bfd.py
@@ -0,0 +1,423 @@
+""" BFD protocol implementation """
+
+from random import randint
+from socket import AF_INET, AF_INET6, inet_pton
+from scapy.all import bind_layers
+from scapy.layers.inet import UDP
+from scapy.packet import Packet
+from scapy.fields import BitField, BitEnumField, XByteField, FlagsField,\
+ ConditionalField, StrField
+from vpp_object import VppObject
+from util import NumericConstant
+from vpp_papi import VppEnum
+
+
+class BFDDiagCode(NumericConstant):
+ """ BFD Diagnostic Code """
+ no_diagnostic = 0
+ control_detection_time_expired = 1
+ echo_function_failed = 2
+ neighbor_signaled_session_down = 3
+ forwarding_plane_reset = 4
+ path_down = 5
+ concatenated_path_down = 6
+ administratively_down = 7
+ reverse_concatenated_path_down = 8
+
+ desc_dict = {
+ no_diagnostic: "No diagnostic",
+ control_detection_time_expired: "Control Detection Time Expired",
+ echo_function_failed: "Echo Function Failed",
+ neighbor_signaled_session_down: "Neighbor Signaled Session Down",
+ forwarding_plane_reset: "Forwarding Plane Reset",
+ path_down: "Path Down",
+ concatenated_path_down: "Concatenated Path Down",
+ administratively_down: "Administratively Down",
+ reverse_concatenated_path_down: "Reverse Concatenated Path Down",
+ }
+
+
+class BFDState(NumericConstant):
+ """ BFD State """
+ admin_down = 0
+ down = 1
+ init = 2
+ up = 3
+
+ desc_dict = {
+ admin_down: "AdminDown",
+ down: "Down",
+ init: "Init",
+ up: "Up",
+ }
+
+
+class BFDAuthType(NumericConstant):
+ """ BFD Authentication Type """
+ no_auth = 0
+ simple_pwd = 1
+ keyed_md5 = 2
+ meticulous_keyed_md5 = 3
+ keyed_sha1 = 4
+ meticulous_keyed_sha1 = 5
+
+ desc_dict = {
+ no_auth: "No authentication",
+ simple_pwd: "Simple Password",
+ keyed_md5: "Keyed MD5",
+ meticulous_keyed_md5: "Meticulous Keyed MD5",
+ keyed_sha1: "Keyed SHA1",
+ meticulous_keyed_sha1: "Meticulous Keyed SHA1",
+ }
+
+
+def bfd_is_auth_used(pkt):
+ """ is packet authenticated? """
+ return "A" in pkt.sprintf("%BFD.flags%")
+
+
+def bfd_is_simple_pwd_used(pkt):
+ """ is simple password authentication used? """
+ return bfd_is_auth_used(pkt) and pkt.auth_type == BFDAuthType.simple_pwd
+
+
+def bfd_is_sha1_used(pkt):
+ """ is sha1 authentication used? """
+ return bfd_is_auth_used(pkt) and pkt.auth_type in \
+ (BFDAuthType.keyed_sha1, BFDAuthType.meticulous_keyed_sha1)
+
+
+def bfd_is_md5_used(pkt):
+ """ is md5 authentication used? """
+ return bfd_is_auth_used(pkt) and pkt.auth_type in \
+ (BFDAuthType.keyed_md5, BFDAuthType.meticulous_keyed_md5)
+
+
+def bfd_is_md5_or_sha1_used(pkt):
+ """ is md5 or sha1 used? """
+ return bfd_is_md5_used(pkt) or bfd_is_sha1_used(pkt)
+
+
+class BFD(Packet):
+ """ BFD protocol layer for scapy """
+
+ udp_dport = 3784 #: BFD destination port per RFC 5881
+ udp_dport_echo = 3785 # : BFD destination port for ECHO per RFC 5881
+ udp_sport_min = 49152 #: BFD source port min value per RFC 5881
+ udp_sport_max = 65535 #: BFD source port max value per RFC 5881
+ bfd_pkt_len = 24 # : length of BFD pkt without authentication section
+ sha1_auth_len = 28 # : length of authentication section if SHA1 used
+
+ name = "BFD"
+
+ fields_desc = [
+ BitField("version", 1, 3),
+ BitEnumField("diag", 0, 5, BFDDiagCode.desc_dict),
+ BitEnumField("state", 0, 2, BFDState.desc_dict),
+ FlagsField("flags", 0, 6, ['M', 'D', 'A', 'C', 'F', 'P']),
+ XByteField("detect_mult", 0),
+ BitField("length", bfd_pkt_len, 8),
+ BitField("my_discriminator", 0, 32),
+ BitField("your_discriminator", 0, 32),
+ BitField("desired_min_tx_interval", 0, 32),
+ BitField("required_min_rx_interval", 0, 32),
+ BitField("required_min_echo_rx_interval", 0, 32),
+ ConditionalField(
+ BitEnumField("auth_type", 0, 8, BFDAuthType.desc_dict),
+ bfd_is_auth_used),
+ ConditionalField(BitField("auth_len", 0, 8), bfd_is_auth_used),
+ ConditionalField(BitField("auth_key_id", 0, 8), bfd_is_auth_used),
+ ConditionalField(BitField("auth_reserved", 0, 8),
+ bfd_is_md5_or_sha1_used),
+ ConditionalField(
+ BitField("auth_seq_num", 0, 32), bfd_is_md5_or_sha1_used),
+ ConditionalField(StrField("auth_key_hash", "0" * 16), bfd_is_md5_used),
+ ConditionalField(
+ StrField("auth_key_hash", "0" * 20), bfd_is_sha1_used),
+ ]
+
+ def mysummary(self):
+ return self.sprintf("BFD(my_disc=%BFD.my_discriminator%,"
+ "your_disc=%BFD.your_discriminator%)")
+
+
+# glue the BFD packet class to scapy parser
+bind_layers(UDP, BFD, dport=BFD.udp_dport)
+
+
+class BFD_vpp_echo(Packet):
+ """ BFD echo packet as used by VPP (non-rfc, as rfc doesn't define one) """
+
+ udp_dport = 3785 #: BFD echo destination port per RFC 5881
+ name = "BFD_VPP_ECHO"
+
+ fields_desc = [
+ BitField("discriminator", 0, 32),
+ BitField("expire_time_clocks", 0, 64),
+ BitField("checksum", 0, 64)
+ ]
+
+ def mysummary(self):
+ return self.sprintf(
+ "BFD_VPP_ECHO(disc=%BFD_VPP_ECHO.discriminator%,"
+ "expire_time_clocks=%BFD_VPP_ECHO.expire_time_clocks%)")
+
+
+# glue the BFD echo packet class to scapy parser
+bind_layers(UDP, BFD_vpp_echo, dport=BFD_vpp_echo.udp_dport)
+
+
+class VppBFDAuthKey(VppObject):
+ """ Represents BFD authentication key in VPP """
+
+ def __init__(self, test, conf_key_id, auth_type, key):
+ self._test = test
+ self._key = key
+ self._auth_type = auth_type
+ test.assertIn(auth_type, BFDAuthType.desc_dict)
+ self._conf_key_id = conf_key_id
+
+ @property
+ def test(self):
+ """ Test which created this key """
+ return self._test
+
+ @property
+ def auth_type(self):
+ """ Authentication type for this key """
+ return self._auth_type
+
+ @property
+ def key(self):
+ """ key data """
+ return self._key
+
+ @key.setter
+ def key(self, value):
+ self._key = value
+
+ @property
+ def conf_key_id(self):
+ """ configuration key ID """
+ return self._conf_key_id
+
+ def add_vpp_config(self):
+ self.test.vapi.bfd_auth_set_key(
+ conf_key_id=self._conf_key_id, auth_type=self._auth_type,
+ key=self._key, key_len=len(self._key))
+ self._test.registry.register(self, self.test.logger)
+
+ def get_bfd_auth_keys_dump_entry(self):
+ """ get the entry in the auth keys dump corresponding to this key """
+ result = self.test.vapi.bfd_auth_keys_dump()
+ for k in result:
+ if k.conf_key_id == self._conf_key_id:
+ return k
+ return None
+
+ def query_vpp_config(self):
+ return self.get_bfd_auth_keys_dump_entry() is not None
+
+ def remove_vpp_config(self):
+ self.test.vapi.bfd_auth_del_key(conf_key_id=self._conf_key_id)
+
+ def object_id(self):
+ return "bfd-auth-key-%s" % self._conf_key_id
+
+
+class VppBFDUDPSession(VppObject):
+ """ Represents BFD UDP session in VPP """
+
+ def __init__(self, test, interface, peer_addr, local_addr=None, af=AF_INET,
+ desired_min_tx=300000, required_min_rx=300000, detect_mult=3,
+ sha1_key=None, bfd_key_id=None, is_tunnel=False):
+ self._test = test
+ self._interface = interface
+ self._af = af
+ if local_addr:
+ self._local_addr = local_addr
+ else:
+ self._local_addr = None
+ self._peer_addr = peer_addr
+ self._desired_min_tx = desired_min_tx
+ self._required_min_rx = required_min_rx
+ self._detect_mult = detect_mult
+ self._sha1_key = sha1_key
+ if bfd_key_id is not None:
+ self._bfd_key_id = bfd_key_id
+ else:
+ self._bfd_key_id = randint(0, 255)
+ self._is_tunnel = is_tunnel
+
+ @property
+ def test(self):
+ """ Test which created this session """
+ return self._test
+
+ @property
+ def interface(self):
+ """ Interface on which this session lives """
+ return self._interface
+
+ @property
+ def af(self):
+ """ Address family - AF_INET or AF_INET6 """
+ return self._af
+
+ @property
+ def local_addr(self):
+ """ BFD session local address (VPP address) """
+ if self._local_addr is None:
+ if self.af == AF_INET:
+ return self._interface.local_ip4
+ elif self.af == AF_INET6:
+ return self._interface.local_ip6
+ else:
+ raise Exception("Unexpected af '%s'" % self.af)
+ return self._local_addr
+
+ @property
+ def peer_addr(self):
+ """ BFD session peer address """
+ return self._peer_addr
+
+ def get_bfd_udp_session_dump_entry(self):
+ """ get the namedtuple entry from bfd udp session dump """
+ result = self.test.vapi.bfd_udp_session_dump()
+ for s in result:
+ self.test.logger.debug("session entry: %s" % str(s))
+ if s.sw_if_index == self.interface.sw_if_index:
+ if self.af == AF_INET \
+ and self.interface.local_ip4 == str(s.local_addr) \
+ and self.interface.remote_ip4 == str(s.peer_addr):
+ return s
+ if self.af == AF_INET6 \
+ and self.interface.local_ip6 == str(s.local_addr) \
+ and self.interface.remote_ip6 == str(s.peer_addr):
+ return s
+ return None
+
+ @property
+ def state(self):
+ """ BFD session state """
+ session = self.get_bfd_udp_session_dump_entry()
+ if session is None:
+ raise Exception("Could not find BFD session in VPP response")
+ return session.state
+
+ @property
+ def desired_min_tx(self):
+ """ desired minimum tx interval """
+ return self._desired_min_tx
+
+ @property
+ def required_min_rx(self):
+ """ required minimum rx interval """
+ return self._required_min_rx
+
+ @property
+ def detect_mult(self):
+ """ detect multiplier """
+ return self._detect_mult
+
+ @property
+ def sha1_key(self):
+ """ sha1 key """
+ return self._sha1_key
+
+ @property
+ def bfd_key_id(self):
+ """ bfd key id in use """
+ return self._bfd_key_id
+
+ @property
+ def is_tunnel(self):
+ return self._is_tunnel
+
+ def activate_auth(self, key, bfd_key_id=None, delayed=False):
+ """ activate authentication for this session """
+ self._bfd_key_id = bfd_key_id if bfd_key_id else randint(0, 255)
+ self._sha1_key = key
+ conf_key_id = self._sha1_key.conf_key_id
+ is_delayed = 1 if delayed else 0
+ self.test.vapi.bfd_udp_auth_activate(
+ sw_if_index=self._interface.sw_if_index,
+ local_addr=self.local_addr,
+ peer_addr=self.peer_addr,
+ bfd_key_id=self._bfd_key_id,
+ conf_key_id=conf_key_id,
+ is_delayed=is_delayed)
+
+ def deactivate_auth(self, delayed=False):
+ """ deactivate authentication """
+ self._bfd_key_id = None
+ self._sha1_key = None
+ is_delayed = 1 if delayed else 0
+ self.test.vapi.bfd_udp_auth_deactivate(
+ sw_if_index=self._interface.sw_if_index,
+ local_addr=self.local_addr,
+ peer_addr=self.peer_addr,
+ is_delayed=is_delayed)
+
+ def modify_parameters(self,
+ detect_mult=None,
+ desired_min_tx=None,
+ required_min_rx=None):
+ """ modify session parameters """
+ if detect_mult:
+ self._detect_mult = detect_mult
+ if desired_min_tx:
+ self._desired_min_tx = desired_min_tx
+ if required_min_rx:
+ self._required_min_rx = required_min_rx
+ self.test.vapi.bfd_udp_mod(sw_if_index=self._interface.sw_if_index,
+ desired_min_tx=self.desired_min_tx,
+ required_min_rx=self.required_min_rx,
+ detect_mult=self.detect_mult,
+ local_addr=self.local_addr,
+ peer_addr=self.peer_addr)
+
+ def add_vpp_config(self):
+ bfd_key_id = self._bfd_key_id if self._sha1_key else None
+ conf_key_id = self._sha1_key.conf_key_id if self._sha1_key else None
+ is_authenticated = True if self._sha1_key else False
+ self.test.vapi.bfd_udp_add(sw_if_index=self._interface.sw_if_index,
+ desired_min_tx=self.desired_min_tx,
+ required_min_rx=self.required_min_rx,
+ detect_mult=self.detect_mult,
+ local_addr=self.local_addr,
+ peer_addr=self.peer_addr,
+ bfd_key_id=bfd_key_id,
+ conf_key_id=conf_key_id,
+ is_authenticated=is_authenticated)
+ self._test.registry.register(self, self.test.logger)
+
+ def query_vpp_config(self):
+ session = self.get_bfd_udp_session_dump_entry()
+ return session is not None
+
+ def remove_vpp_config(self):
+ self.test.vapi.bfd_udp_del(self._interface.sw_if_index,
+ local_addr=self.local_addr,
+ peer_addr=self.peer_addr)
+
+ def object_id(self):
+ return "bfd-udp-%s-%s-%s-%s" % (self._interface.sw_if_index,
+ self.local_addr,
+ self.peer_addr,
+ self.af)
+
+ def admin_up(self):
+ """ set bfd session admin-up """
+ self.test.vapi.bfd_udp_session_set_flags(
+ flags=VppEnum.vl_api_if_status_flags_t.IF_STATUS_API_FLAG_ADMIN_UP,
+ sw_if_index=self._interface.sw_if_index,
+ local_addr=self.local_addr,
+ peer_addr=self.peer_addr)
+
+ def admin_down(self):
+ """ set bfd session admin-down """
+ self.test.vapi.bfd_udp_session_set_flags(
+ flags=0, sw_if_index=self._interface.sw_if_index,
+ local_addr=self.local_addr,
+ peer_addr=self.peer_addr)
diff --git a/test/doc/Makefile b/test/doc/Makefile
index 608df0a1baf..2d06cedd0be 100644
--- a/test/doc/Makefile
+++ b/test/doc/Makefile
@@ -19,9 +19,6 @@ endif
ifndef TEST_DOC_BR
$(error TEST_DOC_BR is not set)
endif
-ifndef BUILD_TEST_SRC
- $(error BUILD_TEST_SRC is not set)
-endif
ifeq ($(IN_VENV),0)
$(error "Not running inside virtualenv (are you running 'make test-doc' from root?)")
endif
@@ -32,7 +29,7 @@ regen-api-doc: verify-virtualenv
@cp $(SRC_DOC_DIR)/index.rst $(API_DOC_GEN_DIR)
@cp $(SRC_DOC_DIR)/indices.rst $(API_DOC_GEN_DIR)
@cp $(SRC_DOC_DIR)/overview.rst $(API_DOC_GEN_DIR)
- sphinx-apidoc -o $(API_DOC_GEN_DIR) -H "Module documentation" $(BUILD_TEST_SRC)
+ sphinx-apidoc -o $(API_DOC_GEN_DIR) -H "Module documentation" $(TEST_DIR)
.PHONY: html
html: regen-api-doc verify-virtualenv
diff --git a/test/doc/conf.py b/test/doc/conf.py
index f5e974954d8..f73cde27fae 100644
--- a/test/doc/conf.py
+++ b/test/doc/conf.py
@@ -18,7 +18,9 @@
#
import os
import sys
-sys.path.insert(0, os.path.abspath('../../build-root/build-test/src'))
+import subprocess
+from datetime import date
+sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
@@ -59,17 +61,18 @@ master_doc = 'index'
# General information about the project.
project = u'VPP test framework'
-copyright = u'2019, VPP team'
-author = u'VPP team'
+copyright = f'{date.today().year}, FD.io VPP team'
+author = u'FD.io VPP team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
-version = u'20.01'
+output = subprocess.run(['../../src/scripts/version'], stdout=subprocess.PIPE)
+version = f'{output.stdout.decode("utf-8")}'
# The full version, including alpha/beta/rc tags.
-release = u'20.01-rc0'
+release = f'{output.stdout.decode("utf-8")}'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/test/test_abf.py b/test/test_abf.py
new file mode 100644
index 00000000000..097476b879a
--- /dev/null
+++ b/test/test_abf.py
@@ -0,0 +1,330 @@
+#!/usr/bin/env python3
+
+from socket import inet_pton, inet_ntop, AF_INET, AF_INET6
+import unittest
+
+from framework import VppTestCase, VppTestRunner
+from vpp_ip import DpoProto
+from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsLabel, \
+ VppIpTable, FibPathProto
+from vpp_acl import AclRule, VppAcl
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, UDP
+from scapy.layers.inet6 import IPv6
+from ipaddress import IPv4Network, IPv6Network
+
+from vpp_object import VppObject
+
+NUM_PKTS = 67
+
+
+def find_abf_policy(test, id):
+ policies = test.vapi.abf_policy_dump()
+ for p in policies:
+ if id == p.policy.policy_id:
+ return True
+ return False
+
+
+def find_abf_itf_attach(test, id, sw_if_index):
+ attachs = test.vapi.abf_itf_attach_dump()
+ for a in attachs:
+ if id == a.attach.policy_id and \
+ sw_if_index == a.attach.sw_if_index:
+ return True
+ return False
+
+
+class VppAbfPolicy(VppObject):
+
+ def __init__(self,
+ test,
+ policy_id,
+ acl,
+ paths):
+ self._test = test
+ self.policy_id = policy_id
+ self.acl = acl
+ self.paths = paths
+ self.encoded_paths = []
+ for path in self.paths:
+ self.encoded_paths.append(path.encode())
+
+ def add_vpp_config(self):
+ self._test.vapi.abf_policy_add_del(
+ 1,
+ {'policy_id': self.policy_id,
+ 'acl_index': self.acl.acl_index,
+ 'n_paths': len(self.paths),
+ 'paths': self.encoded_paths})
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.abf_policy_add_del(
+ 0,
+ {'policy_id': self.policy_id,
+ 'acl_index': self.acl.acl_index,
+ 'n_paths': len(self.paths),
+ 'paths': self.encoded_paths})
+
+ def query_vpp_config(self):
+ return find_abf_policy(self._test, self.policy_id)
+
+ def object_id(self):
+ return ("abf-policy-%d" % self.policy_id)
+
+
+class VppAbfAttach(VppObject):
+
+ def __init__(self,
+ test,
+ policy_id,
+ sw_if_index,
+ priority,
+ is_ipv6=0):
+ self._test = test
+ self.policy_id = policy_id
+ self.sw_if_index = sw_if_index
+ self.priority = priority
+ self.is_ipv6 = is_ipv6
+
+ def add_vpp_config(self):
+ self._test.vapi.abf_itf_attach_add_del(
+ 1,
+ {'policy_id': self.policy_id,
+ 'sw_if_index': self.sw_if_index,
+ 'priority': self.priority,
+ 'is_ipv6': self.is_ipv6})
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.abf_itf_attach_add_del(
+ 0,
+ {'policy_id': self.policy_id,
+ 'sw_if_index': self.sw_if_index,
+ 'priority': self.priority,
+ 'is_ipv6': self.is_ipv6})
+
+ def query_vpp_config(self):
+ return find_abf_itf_attach(self._test,
+ self.policy_id,
+ self.sw_if_index)
+
+ def object_id(self):
+ return ("abf-attach-%d-%d" % (self.policy_id, self.sw_if_index))
+
+
+class TestAbf(VppTestCase):
+ """ ABF Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestAbf, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestAbf, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestAbf, self).setUp()
+
+ self.create_pg_interfaces(range(5))
+
+ for i in self.pg_interfaces[:4]:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+ i.config_ip6()
+ i.resolve_ndp()
+
+ def tearDown(self):
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.unconfig_ip6()
+ i.admin_down()
+ super(TestAbf, self).tearDown()
+
+ def test_abf4(self):
+ """ IPv4 ACL Based Forwarding
+ """
+
+ #
+ # We are not testing the various matching capabilities
+ # of ACLs, that's done elsewhere. Here ware are testing
+ # the application of ACLs to a forwarding path to achieve
+ # ABF
+ # So we construct just a few ACLs to ensure the ABF policies
+ # are correctly constructed and used. And a few path types
+ # to test the API path decoding.
+ #
+
+ #
+ # Rule 1
+ #
+ rule_1 = AclRule(is_permit=1, proto=17, ports=1234,
+ src_prefix=IPv4Network("1.1.1.1/32"),
+ dst_prefix=IPv4Network("1.1.1.2/32"))
+ acl_1 = VppAcl(self, rules=[rule_1])
+ acl_1.add_vpp_config()
+
+ #
+ # ABF policy for ACL 1 - path via interface 1
+ #
+ abf_1 = VppAbfPolicy(self, 10, acl_1,
+ [VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index)])
+ abf_1.add_vpp_config()
+
+ #
+ # Attach the policy to input interface Pg0
+ #
+ attach_1 = VppAbfAttach(self, 10, self.pg0.sw_if_index, 50)
+ attach_1.add_vpp_config()
+
+ #
+ # fire in packet matching the ACL src,dst. If it's forwarded
+ # then the ABF was successful, since default routing will drop it
+ #
+ p_1 = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg0.local_mac) /
+ IP(src="1.1.1.1", dst="1.1.1.2") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ self.send_and_expect(self.pg0, p_1*NUM_PKTS, self.pg1)
+
+ #
+ # Attach a 'better' priority policy to the same interface
+ #
+ abf_2 = VppAbfPolicy(self, 11, acl_1,
+ [VppRoutePath(self.pg2.remote_ip4,
+ self.pg2.sw_if_index)])
+ abf_2.add_vpp_config()
+ attach_2 = VppAbfAttach(self, 11, self.pg0.sw_if_index, 40)
+ attach_2.add_vpp_config()
+
+ self.send_and_expect(self.pg0, p_1*NUM_PKTS, self.pg2)
+
+ #
+ # Attach a policy with priority in the middle
+ #
+ abf_3 = VppAbfPolicy(self, 12, acl_1,
+ [VppRoutePath(self.pg3.remote_ip4,
+ self.pg3.sw_if_index)])
+ abf_3.add_vpp_config()
+ attach_3 = VppAbfAttach(self, 12, self.pg0.sw_if_index, 45)
+ attach_3.add_vpp_config()
+
+ self.send_and_expect(self.pg0, p_1*NUM_PKTS, self.pg2)
+
+ #
+ # remove the best priority
+ #
+ attach_2.remove_vpp_config()
+ self.send_and_expect(self.pg0, p_1*NUM_PKTS, self.pg3)
+
+ #
+ # Attach one of the same policies to Pg1
+ #
+ attach_4 = VppAbfAttach(self, 12, self.pg1.sw_if_index, 45)
+ attach_4.add_vpp_config()
+
+ p_2 = (Ether(src=self.pg1.remote_mac,
+ dst=self.pg1.local_mac) /
+ IP(src="1.1.1.1", dst="1.1.1.2") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ self.send_and_expect(self.pg1, p_2 * NUM_PKTS, self.pg3)
+
+ #
+ # detach the policy from PG1, now expect traffic to be dropped
+ #
+ attach_4.remove_vpp_config()
+
+ self.send_and_assert_no_replies(self.pg1, p_2 * NUM_PKTS, "Detached")
+
+ #
+ # Swap to route via a next-hop in the non-default table
+ #
+ table_20 = VppIpTable(self, 20)
+ table_20.add_vpp_config()
+
+ self.pg4.set_table_ip4(table_20.table_id)
+ self.pg4.admin_up()
+ self.pg4.config_ip4()
+ self.pg4.resolve_arp()
+
+ abf_13 = VppAbfPolicy(self, 13, acl_1,
+ [VppRoutePath(self.pg4.remote_ip4,
+ 0xffffffff,
+ nh_table_id=table_20.table_id)])
+ abf_13.add_vpp_config()
+ attach_5 = VppAbfAttach(self, 13, self.pg0.sw_if_index, 30)
+ attach_5.add_vpp_config()
+
+ self.send_and_expect(self.pg0, p_1*NUM_PKTS, self.pg4)
+
+ self.pg4.unconfig_ip4()
+ self.pg4.set_table_ip4(0)
+
+ def test_abf6(self):
+ """ IPv6 ACL Based Forwarding
+ """
+
+ #
+ # Simple test for matching IPv6 packets
+ #
+
+ #
+ # Rule 1
+ #
+ rule_1 = AclRule(is_permit=1, proto=17, ports=1234,
+ src_prefix=IPv6Network("2001::2/128"),
+ dst_prefix=IPv6Network("2001::1/128"))
+ acl_1 = VppAcl(self, rules=[rule_1])
+ acl_1.add_vpp_config()
+
+ #
+ # ABF policy for ACL 1 - path via interface 1
+ #
+ abf_1 = VppAbfPolicy(self, 10, acl_1,
+ [VppRoutePath("3001::1",
+ 0xffffffff)])
+ abf_1.add_vpp_config()
+
+ attach_1 = VppAbfAttach(self, 10, self.pg0.sw_if_index,
+ 45, is_ipv6=True)
+ attach_1.add_vpp_config()
+
+ #
+ # a packet matching the rule
+ #
+ p = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg0.local_mac) /
+ IPv6(src="2001::2", dst="2001::1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ #
+ # packets are dropped because there is no route to the policy's
+ # next hop
+ #
+ self.send_and_assert_no_replies(self.pg1, p * NUM_PKTS, "no route")
+
+ #
+ # add a route resolving the next-hop
+ #
+ route = VppIpRoute(self, "3001::1", 32,
+ [VppRoutePath(self.pg1.remote_ip6,
+ self.pg1.sw_if_index)])
+ route.add_vpp_config()
+
+ #
+ # now expect packets forwarded.
+ #
+ self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg1)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_acl_plugin.py b/test/test_acl_plugin.py
new file mode 100644
index 00000000000..53d96215949
--- /dev/null
+++ b/test/test_acl_plugin.py
@@ -0,0 +1,1438 @@
+#!/usr/bin/env python3
+"""ACL plugin Test Case HLD:
+"""
+
+import unittest
+import random
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, TCP, UDP, ICMP
+from scapy.layers.inet6 import IPv6, ICMPv6EchoRequest
+from scapy.layers.inet6 import IPv6ExtHdrFragment
+from framework import VppTestCase, VppTestRunner
+from framework import tag_fixme_vpp_workers
+from util import Host, ppp
+from ipaddress import IPv4Network, IPv6Network
+
+from vpp_lo_interface import VppLoInterface
+from vpp_acl import AclRule, VppAcl, VppAclInterface, VppEtypeWhitelist
+from vpp_ip import INVALID_INDEX
+
+
+@tag_fixme_vpp_workers
+class TestACLplugin(VppTestCase):
+ """ ACL plugin Test Case """
+
+ # traffic types
+ IP = 0
+ ICMP = 1
+
+ # IP version
+ IPRANDOM = -1
+ IPV4 = 0
+ IPV6 = 1
+
+ # rule types
+ DENY = 0
+ PERMIT = 1
+
+ # supported protocols
+ proto = [[6, 17], [1, 58]]
+ proto_map = {1: 'ICMP', 58: 'ICMPv6EchoRequest', 6: 'TCP', 17: 'UDP'}
+ ICMPv4 = 0
+ ICMPv6 = 1
+ TCP = 0
+ UDP = 1
+ PROTO_ALL = 0
+
+ # port ranges
+ PORTS_ALL = -1
+ PORTS_RANGE = 0
+ PORTS_RANGE_2 = 1
+ udp_sport_from = 10
+ udp_sport_to = udp_sport_from + 5
+ udp_dport_from = 20000
+ udp_dport_to = udp_dport_from + 5000
+ tcp_sport_from = 30
+ tcp_sport_to = tcp_sport_from + 5
+ tcp_dport_from = 40000
+ tcp_dport_to = tcp_dport_from + 5000
+
+ udp_sport_from_2 = 90
+ udp_sport_to_2 = udp_sport_from_2 + 5
+ udp_dport_from_2 = 30000
+ udp_dport_to_2 = udp_dport_from_2 + 5000
+ tcp_sport_from_2 = 130
+ tcp_sport_to_2 = tcp_sport_from_2 + 5
+ tcp_dport_from_2 = 20000
+ tcp_dport_to_2 = tcp_dport_from_2 + 5000
+
+ icmp4_type = 8 # echo request
+ icmp4_code = 3
+ icmp6_type = 128 # echo request
+ icmp6_code = 3
+
+ icmp4_type_2 = 8
+ icmp4_code_from_2 = 5
+ icmp4_code_to_2 = 20
+ icmp6_type_2 = 128
+ icmp6_code_from_2 = 8
+ icmp6_code_to_2 = 42
+
+ # Test variables
+ bd_id = 1
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Perform standard class setup (defined by class method setUpClass in
+ class VppTestCase) before running the test case, set test case related
+ variables and configure VPP.
+ """
+ super(TestACLplugin, cls).setUpClass()
+
+ try:
+ # Create 2 pg interfaces
+ cls.create_pg_interfaces(range(2))
+
+ # Packet flows mapping pg0 -> pg1, pg2 etc.
+ cls.flows = dict()
+ cls.flows[cls.pg0] = [cls.pg1]
+
+ # Packet sizes
+ cls.pg_if_packet_sizes = [64, 512, 1518, 9018]
+
+ # Create BD with MAC learning and unknown unicast flooding disabled
+ # and put interfaces to this BD
+ cls.vapi.bridge_domain_add_del(bd_id=cls.bd_id, uu_flood=1,
+ learn=1)
+ for pg_if in cls.pg_interfaces:
+ cls.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=pg_if.sw_if_index, bd_id=cls.bd_id)
+
+ # Set up all interfaces
+ for i in cls.pg_interfaces:
+ i.admin_up()
+
+ # Mapping between packet-generator index and lists of test hosts
+ cls.hosts_by_pg_idx = dict()
+ for pg_if in cls.pg_interfaces:
+ cls.hosts_by_pg_idx[pg_if.sw_if_index] = []
+
+ # Create list of deleted hosts
+ cls.deleted_hosts_by_pg_idx = dict()
+ for pg_if in cls.pg_interfaces:
+ cls.deleted_hosts_by_pg_idx[pg_if.sw_if_index] = []
+
+ # warm-up the mac address tables
+ # self.warmup_test()
+ count = 16
+ start = 0
+ n_int = len(cls.pg_interfaces)
+ macs_per_if = count // n_int
+ i = -1
+ for pg_if in cls.pg_interfaces:
+ i += 1
+ start_nr = macs_per_if * i + start
+ end_nr = count + start if i == (n_int - 1) \
+ else macs_per_if * (i + 1) + start
+ hosts = cls.hosts_by_pg_idx[pg_if.sw_if_index]
+ for j in range(int(start_nr), int(end_nr)):
+ host = Host(
+ "00:00:00:ff:%02x:%02x" % (pg_if.sw_if_index, j),
+ "172.17.1%02x.%u" % (pg_if.sw_if_index, j),
+ "2017:dead:%02x::%u" % (pg_if.sw_if_index, j))
+ hosts.append(host)
+
+ except Exception:
+ super(TestACLplugin, cls).tearDownClass()
+ raise
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestACLplugin, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestACLplugin, self).setUp()
+ self.reset_packet_infos()
+
+ def tearDown(self):
+ """
+ Show various debug prints after each test.
+ """
+ super(TestACLplugin, self).tearDown()
+
+ def show_commands_at_teardown(self):
+ cli = "show vlib graph l2-input-feat-arc"
+ self.logger.info(self.vapi.ppcli(cli))
+ cli = "show vlib graph l2-input-feat-arc-end"
+ self.logger.info(self.vapi.ppcli(cli))
+ cli = "show vlib graph l2-output-feat-arc"
+ self.logger.info(self.vapi.ppcli(cli))
+ cli = "show vlib graph l2-output-feat-arc-end"
+ self.logger.info(self.vapi.ppcli(cli))
+ self.logger.info(self.vapi.ppcli("show l2fib verbose"))
+ self.logger.info(self.vapi.ppcli("show acl-plugin acl"))
+ self.logger.info(self.vapi.ppcli("show acl-plugin interface"))
+ self.logger.info(self.vapi.ppcli("show acl-plugin tables"))
+ self.logger.info(self.vapi.ppcli("show bridge-domain %s detail"
+ % self.bd_id))
+
+ def create_rule(self, ip=0, permit_deny=0, ports=PORTS_ALL, proto=-1,
+ s_prefix=0, s_ip=0,
+ d_prefix=0, d_ip=0):
+ if ip:
+ src_prefix = IPv6Network((s_ip, s_prefix))
+ dst_prefix = IPv6Network((d_ip, d_prefix))
+ else:
+ src_prefix = IPv4Network((s_ip, s_prefix))
+ dst_prefix = IPv4Network((d_ip, d_prefix))
+ return AclRule(is_permit=permit_deny, ports=ports, proto=proto,
+ src_prefix=src_prefix, dst_prefix=dst_prefix)
+
+ def apply_rules(self, rules, tag=None):
+ acl = VppAcl(self, rules, tag=tag)
+ acl.add_vpp_config()
+ self.logger.info("Dumped ACL: " + str(acl.dump()))
+ # Apply a ACL on the interface as inbound
+ for i in self.pg_interfaces:
+ acl_if = VppAclInterface(
+ self, sw_if_index=i.sw_if_index, n_input=1, acls=[acl])
+ acl_if.add_vpp_config()
+ return acl.acl_index
+
+ def apply_rules_to(self, rules, tag=None, sw_if_index=INVALID_INDEX):
+ acl = VppAcl(self, rules, tag=tag)
+ acl.add_vpp_config()
+ self.logger.info("Dumped ACL: " + str(acl.dump()))
+ # Apply a ACL on the interface as inbound
+ acl_if = VppAclInterface(self, sw_if_index=sw_if_index, n_input=1,
+ acls=[acl])
+ return acl.acl_index
+
+ def etype_whitelist(self, whitelist, n_input, add=True):
+ # Apply whitelists on all the interfaces
+ if add:
+ self._wl = []
+ for i in self.pg_interfaces:
+ self._wl.append(VppEtypeWhitelist(
+ self, sw_if_index=i.sw_if_index, whitelist=whitelist,
+ n_input=n_input).add_vpp_config())
+ else:
+ if hasattr(self, "_wl"):
+ for wl in self._wl:
+ wl.remove_vpp_config()
+
+ def create_upper_layer(self, packet_index, proto, ports=0):
+ p = self.proto_map[proto]
+ if p == 'UDP':
+ if ports == 0:
+ return UDP(sport=random.randint(self.udp_sport_from,
+ self.udp_sport_to),
+ dport=random.randint(self.udp_dport_from,
+ self.udp_dport_to))
+ else:
+ return UDP(sport=ports, dport=ports)
+ elif p == 'TCP':
+ if ports == 0:
+ return TCP(sport=random.randint(self.tcp_sport_from,
+ self.tcp_sport_to),
+ dport=random.randint(self.tcp_dport_from,
+ self.tcp_dport_to))
+ else:
+ return TCP(sport=ports, dport=ports)
+ return ''
+
+ def create_stream(self, src_if, packet_sizes, traffic_type=0, ipv6=0,
+ proto=-1, ports=0, fragments=False,
+ pkt_raw=True, etype=-1):
+ """
+ Create input packet stream for defined interface using hosts or
+ deleted_hosts list.
+
+ :param object src_if: Interface to create packet stream for.
+ :param list packet_sizes: List of required packet sizes.
+ :param traffic_type: 1: ICMP packet, 2: IPv6 with EH, 0: otherwise.
+ :return: Stream of packets.
+ """
+ pkts = []
+ if self.flows.__contains__(src_if):
+ src_hosts = self.hosts_by_pg_idx[src_if.sw_if_index]
+ for dst_if in self.flows[src_if]:
+ dst_hosts = self.hosts_by_pg_idx[dst_if.sw_if_index]
+ n_int = len(dst_hosts) * len(src_hosts)
+ for i in range(0, n_int):
+ dst_host = dst_hosts[int(i / len(src_hosts))]
+ src_host = src_hosts[i % len(src_hosts)]
+ pkt_info = self.create_packet_info(src_if, dst_if)
+ if ipv6 == 1:
+ pkt_info.ip = 1
+ elif ipv6 == 0:
+ pkt_info.ip = 0
+ else:
+ pkt_info.ip = random.choice([0, 1])
+ if proto == -1:
+ pkt_info.proto = random.choice(self.proto[self.IP])
+ else:
+ pkt_info.proto = proto
+ payload = self.info_to_payload(pkt_info)
+ p = Ether(dst=dst_host.mac, src=src_host.mac)
+ if etype > 0:
+ p = Ether(dst=dst_host.mac,
+ src=src_host.mac,
+ type=etype)
+ if pkt_info.ip:
+ p /= IPv6(dst=dst_host.ip6, src=src_host.ip6)
+ if fragments:
+ p /= IPv6ExtHdrFragment(offset=64, m=1)
+ else:
+ if fragments:
+ p /= IP(src=src_host.ip4, dst=dst_host.ip4,
+ flags=1, frag=64)
+ else:
+ p /= IP(src=src_host.ip4, dst=dst_host.ip4)
+ if traffic_type == self.ICMP:
+ if pkt_info.ip:
+ p /= ICMPv6EchoRequest(type=self.icmp6_type,
+ code=self.icmp6_code)
+ else:
+ p /= ICMP(type=self.icmp4_type,
+ code=self.icmp4_code)
+ else:
+ p /= self.create_upper_layer(i, pkt_info.proto, ports)
+ if pkt_raw:
+ p /= Raw(payload)
+ pkt_info.data = p.copy()
+ if pkt_raw:
+ size = random.choice(packet_sizes)
+ self.extend_packet(p, size)
+ pkts.append(p)
+ return pkts
+
+ def verify_capture(self, pg_if, capture,
+ traffic_type=0, ip_type=0, etype=-1):
+ """
+ Verify captured input packet stream for defined interface.
+
+ :param object pg_if: Interface to verify captured packet stream for.
+ :param list capture: Captured packet stream.
+ :param traffic_type: 1: ICMP packet, 2: IPv6 with EH, 0: otherwise.
+ """
+ last_info = dict()
+ for i in self.pg_interfaces:
+ last_info[i.sw_if_index] = None
+ dst_sw_if_index = pg_if.sw_if_index
+ for packet in capture:
+ if etype > 0:
+ if packet[Ether].type != etype:
+ self.logger.error(ppp("Unexpected ethertype in packet:",
+ packet))
+ else:
+ continue
+ try:
+ # Raw data for ICMPv6 are stored in ICMPv6EchoRequest.data
+ if traffic_type == self.ICMP and ip_type == self.IPV6:
+ payload_info = self.payload_to_info(
+ packet[ICMPv6EchoRequest], 'data')
+ payload = packet[ICMPv6EchoRequest]
+ else:
+ payload_info = self.payload_to_info(packet[Raw])
+ payload = packet[self.proto_map[payload_info.proto]]
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet "
+ "(outside network):", packet))
+ raise
+
+ if ip_type != 0:
+ self.assertEqual(payload_info.ip, ip_type)
+ if traffic_type == self.ICMP:
+ try:
+ if payload_info.ip == 0:
+ self.assertEqual(payload.type, self.icmp4_type)
+ self.assertEqual(payload.code, self.icmp4_code)
+ else:
+ self.assertEqual(payload.type, self.icmp6_type)
+ self.assertEqual(payload.code, self.icmp6_code)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet "
+ "(outside network):", packet))
+ raise
+ else:
+ try:
+ ip_version = IPv6 if payload_info.ip == 1 else IP
+
+ ip = packet[ip_version]
+ packet_index = payload_info.index
+
+ self.assertEqual(payload_info.dst, dst_sw_if_index)
+ self.logger.debug("Got packet on port %s: src=%u (id=%u)" %
+ (pg_if.name, payload_info.src,
+ packet_index))
+ next_info = self.get_next_packet_info_for_interface2(
+ payload_info.src, dst_sw_if_index,
+ last_info[payload_info.src])
+ last_info[payload_info.src] = next_info
+ self.assertTrue(next_info is not None)
+ self.assertEqual(packet_index, next_info.index)
+ saved_packet = next_info.data
+ # Check standard fields
+ self.assertEqual(ip.src, saved_packet[ip_version].src)
+ self.assertEqual(ip.dst, saved_packet[ip_version].dst)
+ p = self.proto_map[payload_info.proto]
+ if p == 'TCP':
+ tcp = packet[TCP]
+ self.assertEqual(tcp.sport, saved_packet[
+ TCP].sport)
+ self.assertEqual(tcp.dport, saved_packet[
+ TCP].dport)
+ elif p == 'UDP':
+ udp = packet[UDP]
+ self.assertEqual(udp.sport, saved_packet[
+ UDP].sport)
+ self.assertEqual(udp.dport, saved_packet[
+ UDP].dport)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:",
+ packet))
+ raise
+ for i in self.pg_interfaces:
+ remaining_packet = self.get_next_packet_info_for_interface2(
+ i, dst_sw_if_index, last_info[i.sw_if_index])
+ self.assertTrue(
+ remaining_packet is None,
+ "Port %u: Packet expected from source %u didn't arrive" %
+ (dst_sw_if_index, i.sw_if_index))
+
+ def run_traffic_no_check(self):
+ # Test
+ # Create incoming packet streams for packet-generator interfaces
+ for i in self.pg_interfaces:
+ if self.flows.__contains__(i):
+ pkts = self.create_stream(i, self.pg_if_packet_sizes)
+ if len(pkts) > 0:
+ i.add_stream(pkts)
+
+ # Enable packet capture and start packet sending
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ def run_verify_test(self, traffic_type=0, ip_type=0, proto=-1, ports=0,
+ frags=False, pkt_raw=True, etype=-1):
+ # Test
+ # Create incoming packet streams for packet-generator interfaces
+ pkts_cnt = 0
+ for i in self.pg_interfaces:
+ if self.flows.__contains__(i):
+ pkts = self.create_stream(i, self.pg_if_packet_sizes,
+ traffic_type, ip_type, proto, ports,
+ frags, pkt_raw, etype)
+ if len(pkts) > 0:
+ i.add_stream(pkts)
+ pkts_cnt += len(pkts)
+
+ # Enable packet capture and start packet sendingself.IPV
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.logger.info("sent packets count: %d" % pkts_cnt)
+
+ # Verify
+ # Verify outgoing packet streams per packet-generator interface
+ for src_if in self.pg_interfaces:
+ if self.flows.__contains__(src_if):
+ for dst_if in self.flows[src_if]:
+ capture = dst_if.get_capture(pkts_cnt)
+ self.logger.info("Verifying capture on interface %s" %
+ dst_if.name)
+ self.verify_capture(dst_if, capture,
+ traffic_type, ip_type, etype)
+
+ def run_verify_negat_test(self, traffic_type=0, ip_type=0, proto=-1,
+ ports=0, frags=False, etype=-1):
+ # Test
+ pkts_cnt = 0
+ self.reset_packet_infos()
+ for i in self.pg_interfaces:
+ if self.flows.__contains__(i):
+ pkts = self.create_stream(i, self.pg_if_packet_sizes,
+ traffic_type, ip_type, proto, ports,
+ frags, True, etype)
+ if len(pkts) > 0:
+ i.add_stream(pkts)
+ pkts_cnt += len(pkts)
+
+ # Enable packet capture and start packet sending
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.logger.info("sent packets count: %d" % pkts_cnt)
+
+ # Verify
+ # Verify outgoing packet streams per packet-generator interface
+ for src_if in self.pg_interfaces:
+ if self.flows.__contains__(src_if):
+ for dst_if in self.flows[src_if]:
+ self.logger.info("Verifying capture on interface %s" %
+ dst_if.name)
+ capture = dst_if.get_capture(0)
+ self.assertEqual(len(capture), 0)
+
+ def test_0000_warmup_test(self):
+ """ ACL plugin version check; learn MACs
+ """
+ reply = self.vapi.papi.acl_plugin_get_version()
+ self.assertEqual(reply.major, 1)
+ self.logger.info("Working with ACL plugin version: %d.%d" % (
+ reply.major, reply.minor))
+ # minor version changes are non breaking
+ # self.assertEqual(reply.minor, 0)
+
+ def test_0001_acl_create(self):
+ """ ACL create/delete test
+ """
+
+ self.logger.info("ACLP_TEST_START_0001")
+ # Create a permit-1234 ACL
+ r = [AclRule(is_permit=1, proto=17, ports=1234, sport_to=1235)]
+ # Test 1: add a new ACL
+ first_acl = VppAcl(self, rules=r, tag="permit 1234")
+ first_acl.add_vpp_config()
+ self.assertTrue(first_acl.query_vpp_config())
+ # The very first ACL gets #0
+ self.assertEqual(first_acl.acl_index, 0)
+ rr = first_acl.dump()
+ self.logger.info("Dumped ACL: " + str(rr))
+ self.assertEqual(len(rr), 1)
+ # We should have the same number of ACL entries as we had asked
+ self.assertEqual(len(rr[0].r), len(r))
+ # The rules should be the same. But because the submitted and returned
+ # are different types, we need to iterate over rules and keys to get
+ # to basic values.
+ for i_rule in range(0, len(r) - 1):
+ encoded_rule = r[i_rule].encode()
+ for rule_key in encoded_rule:
+ self.assertEqual(rr[0].r[i_rule][rule_key],
+ encoded_rule[rule_key])
+
+ # Create a deny-1234 ACL
+ r_deny = [AclRule(is_permit=0, proto=17, ports=1234, sport_to=1235),
+ AclRule(is_permit=1, proto=17, ports=0)]
+ second_acl = VppAcl(self, rules=r_deny, tag="deny 1234;permit all")
+ second_acl.add_vpp_config()
+ self.assertTrue(second_acl.query_vpp_config())
+ # The second ACL gets #1
+ self.assertEqual(second_acl.acl_index, 1)
+
+ # Test 2: try to modify a nonexistent ACL
+ invalid_acl = VppAcl(self, acl_index=432, rules=r, tag="FFFF:FFFF")
+ reply = invalid_acl.add_vpp_config(expect_error=True)
+
+ # apply an ACL on an interface inbound, try to delete ACL, must fail
+ acl_if_list = VppAclInterface(
+ self, sw_if_index=self.pg0.sw_if_index, n_input=1,
+ acls=[first_acl])
+ acl_if_list.add_vpp_config()
+ first_acl.remove_vpp_config(expect_error=True)
+ # Unapply an ACL and then try to delete it - must be ok
+ acl_if_list.remove_vpp_config()
+ first_acl.remove_vpp_config()
+
+ # apply an ACL on an interface inbound, try to delete ACL, must fail
+ acl_if_list = VppAclInterface(
+ self, sw_if_index=self.pg0.sw_if_index, n_input=0,
+ acls=[second_acl])
+ acl_if_list.add_vpp_config()
+ second_acl.remove_vpp_config(expect_error=True)
+ # Unapply an ACL and then try to delete it - must be ok
+ acl_if_list.remove_vpp_config()
+ second_acl.remove_vpp_config()
+
+ # try to apply a nonexistent ACL - must fail
+ acl_if_list = VppAclInterface(
+ self, sw_if_index=self.pg0.sw_if_index, n_input=0,
+ acls=[invalid_acl])
+ acl_if_list.add_vpp_config(expect_error=True)
+
+ self.logger.info("ACLP_TEST_FINISH_0001")
+
+ def test_0002_acl_permit_apply(self):
+ """ permit ACL apply test
+ """
+ self.logger.info("ACLP_TEST_START_0002")
+
+ rules = []
+ rules.append(self.create_rule(self.IPV4, self.PERMIT,
+ 0, self.proto[self.IP][self.UDP]))
+ rules.append(self.create_rule(self.IPV4, self.PERMIT,
+ 0, self.proto[self.IP][self.TCP]))
+
+ # Apply rules
+ acl_idx = self.apply_rules(rules, "permit per-flow")
+
+ # enable counters
+ reply = self.vapi.papi.acl_stats_intf_counters_enable(enable=1)
+
+ # Traffic should still pass
+ self.run_verify_test(self.IP, self.IPV4, -1)
+
+ matches = self.statistics.get_counter('/acl/%d/matches' % acl_idx)
+ self.logger.info("stat segment counters: %s" % repr(matches))
+ cli = "show acl-plugin acl"
+ self.logger.info(self.vapi.ppcli(cli))
+ cli = "show acl-plugin tables"
+ self.logger.info(self.vapi.ppcli(cli))
+
+ total_hits = matches[0][0]['packets'] + matches[0][1]['packets']
+ self.assertEqual(total_hits, 64)
+
+ # disable counters
+ reply = self.vapi.papi.acl_stats_intf_counters_enable(enable=0)
+
+ self.logger.info("ACLP_TEST_FINISH_0002")
+
+ def test_0003_acl_deny_apply(self):
+ """ deny ACL apply test
+ """
+ self.logger.info("ACLP_TEST_START_0003")
+ # Add a deny-flows ACL
+ rules = []
+ rules.append(self.create_rule(
+ self.IPV4, self.DENY, self.PORTS_ALL,
+ self.proto[self.IP][self.UDP]))
+ # Permit ip any any in the end
+ rules.append(self.create_rule(self.IPV4, self.PERMIT,
+ self.PORTS_ALL, 0))
+
+ # Apply rules
+ acl_idx = self.apply_rules(rules, "deny per-flow;permit all")
+
+ # enable counters
+ reply = self.vapi.papi.acl_stats_intf_counters_enable(enable=1)
+
+ # Traffic should not pass
+ self.run_verify_negat_test(self.IP, self.IPV4,
+ self.proto[self.IP][self.UDP])
+
+ matches = self.statistics.get_counter('/acl/%d/matches' % acl_idx)
+ self.logger.info("stat segment counters: %s" % repr(matches))
+ cli = "show acl-plugin acl"
+ self.logger.info(self.vapi.ppcli(cli))
+ cli = "show acl-plugin tables"
+ self.logger.info(self.vapi.ppcli(cli))
+ self.assertEqual(matches[0][0]['packets'], 64)
+ # disable counters
+ reply = self.vapi.papi.acl_stats_intf_counters_enable(enable=0)
+ self.logger.info("ACLP_TEST_FINISH_0003")
+ # self.assertEqual(, 0)
+
+ def test_0004_vpp624_permit_icmpv4(self):
+ """ VPP_624 permit ICMPv4
+ """
+ self.logger.info("ACLP_TEST_START_0004")
+
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV4, self.PERMIT, self.PORTS_RANGE,
+ self.proto[self.ICMP][self.ICMPv4]))
+ # deny ip any any in the end
+ rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "permit icmpv4")
+
+ # Traffic should still pass
+ self.run_verify_test(self.ICMP, self.IPV4,
+ self.proto[self.ICMP][self.ICMPv4])
+
+ self.logger.info("ACLP_TEST_FINISH_0004")
+
+ def test_0005_vpp624_permit_icmpv6(self):
+ """ VPP_624 permit ICMPv6
+ """
+ self.logger.info("ACLP_TEST_START_0005")
+
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV6, self.PERMIT, self.PORTS_RANGE,
+ self.proto[self.ICMP][self.ICMPv6]))
+ # deny ip any any in the end
+ rules.append(self.create_rule(self.IPV6, self.DENY, self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "permit icmpv6")
+
+ # Traffic should still pass
+ self.run_verify_test(self.ICMP, self.IPV6,
+ self.proto[self.ICMP][self.ICMPv6])
+
+ self.logger.info("ACLP_TEST_FINISH_0005")
+
+ def test_0006_vpp624_deny_icmpv4(self):
+ """ VPP_624 deny ICMPv4
+ """
+ self.logger.info("ACLP_TEST_START_0006")
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_RANGE,
+ self.proto[self.ICMP][self.ICMPv4]))
+ # permit ip any any in the end
+ rules.append(self.create_rule(self.IPV4, self.PERMIT,
+ self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "deny icmpv4")
+
+ # Traffic should not pass
+ self.run_verify_negat_test(self.ICMP, self.IPV4, 0)
+
+ self.logger.info("ACLP_TEST_FINISH_0006")
+
+ def test_0007_vpp624_deny_icmpv6(self):
+ """ VPP_624 deny ICMPv6
+ """
+ self.logger.info("ACLP_TEST_START_0007")
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV6, self.DENY, self.PORTS_RANGE,
+ self.proto[self.ICMP][self.ICMPv6]))
+ # deny ip any any in the end
+ rules.append(self.create_rule(self.IPV6, self.PERMIT,
+ self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "deny icmpv6")
+
+ # Traffic should not pass
+ self.run_verify_negat_test(self.ICMP, self.IPV6, 0)
+
+ self.logger.info("ACLP_TEST_FINISH_0007")
+
+ def test_0008_tcp_permit_v4(self):
+ """ permit TCPv4
+ """
+ self.logger.info("ACLP_TEST_START_0008")
+
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV4, self.PERMIT, self.PORTS_RANGE,
+ self.proto[self.IP][self.TCP]))
+ # deny ip any any in the end
+ rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "permit ipv4 tcp")
+
+ # Traffic should still pass
+ self.run_verify_test(self.IP, self.IPV4, self.proto[self.IP][self.TCP])
+
+ self.logger.info("ACLP_TEST_FINISH_0008")
+
+ def test_0009_tcp_permit_v6(self):
+ """ permit TCPv6
+ """
+ self.logger.info("ACLP_TEST_START_0009")
+
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV6, self.PERMIT, self.PORTS_RANGE,
+ self.proto[self.IP][self.TCP]))
+ # deny ip any any in the end
+ rules.append(self.create_rule(self.IPV6, self.DENY, self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "permit ip6 tcp")
+
+ # Traffic should still pass
+ self.run_verify_test(self.IP, self.IPV6, self.proto[self.IP][self.TCP])
+
+ self.logger.info("ACLP_TEST_FINISH_0008")
+
+ def test_0010_udp_permit_v4(self):
+ """ permit UDPv4
+ """
+ self.logger.info("ACLP_TEST_START_0010")
+
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV4, self.PERMIT, self.PORTS_RANGE,
+ self.proto[self.IP][self.UDP]))
+ # deny ip any any in the end
+ rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "permit ipv udp")
+
+ # Traffic should still pass
+ self.run_verify_test(self.IP, self.IPV4, self.proto[self.IP][self.UDP])
+
+ self.logger.info("ACLP_TEST_FINISH_0010")
+
+ def test_0011_udp_permit_v6(self):
+ """ permit UDPv6
+ """
+ self.logger.info("ACLP_TEST_START_0011")
+
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV6, self.PERMIT, self.PORTS_RANGE,
+ self.proto[self.IP][self.UDP]))
+ # deny ip any any in the end
+ rules.append(self.create_rule(self.IPV6, self.DENY, self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "permit ip6 udp")
+
+ # Traffic should still pass
+ self.run_verify_test(self.IP, self.IPV6, self.proto[self.IP][self.UDP])
+
+ self.logger.info("ACLP_TEST_FINISH_0011")
+
+ def test_0012_tcp_deny(self):
+ """ deny TCPv4/v6
+ """
+ self.logger.info("ACLP_TEST_START_0012")
+
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_RANGE,
+ self.proto[self.IP][self.TCP]))
+ rules.append(self.create_rule(self.IPV6, self.DENY, self.PORTS_RANGE,
+ self.proto[self.IP][self.TCP]))
+ # permit ip any any in the end
+ rules.append(self.create_rule(self.IPV4, self.PERMIT,
+ self.PORTS_ALL, 0))
+ rules.append(self.create_rule(self.IPV6, self.PERMIT,
+ self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "deny ip4/ip6 tcp")
+
+ # Traffic should not pass
+ self.run_verify_negat_test(self.IP, self.IPRANDOM,
+ self.proto[self.IP][self.TCP])
+
+ self.logger.info("ACLP_TEST_FINISH_0012")
+
+ def test_0013_udp_deny(self):
+ """ deny UDPv4/v6
+ """
+ self.logger.info("ACLP_TEST_START_0013")
+
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_RANGE,
+ self.proto[self.IP][self.UDP]))
+ rules.append(self.create_rule(self.IPV6, self.DENY, self.PORTS_RANGE,
+ self.proto[self.IP][self.UDP]))
+ # permit ip any any in the end
+ rules.append(self.create_rule(self.IPV4, self.PERMIT,
+ self.PORTS_ALL, 0))
+ rules.append(self.create_rule(self.IPV6, self.PERMIT,
+ self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "deny ip4/ip6 udp")
+
+ # Traffic should not pass
+ self.run_verify_negat_test(self.IP, self.IPRANDOM,
+ self.proto[self.IP][self.UDP])
+
+ self.logger.info("ACLP_TEST_FINISH_0013")
+
+ def test_0014_acl_dump(self):
+ """ verify add/dump acls
+ """
+ self.logger.info("ACLP_TEST_START_0014")
+
+ r = [[self.IPV4, self.PERMIT, 1234, self.proto[self.IP][self.TCP]],
+ [self.IPV4, self.PERMIT, 2345, self.proto[self.IP][self.UDP]],
+ [self.IPV4, self.PERMIT, 0, self.proto[self.IP][self.TCP]],
+ [self.IPV4, self.PERMIT, 0, self.proto[self.IP][self.UDP]],
+ [self.IPV4, self.PERMIT, 5, self.proto[self.ICMP][self.ICMPv4]],
+ [self.IPV6, self.PERMIT, 4321, self.proto[self.IP][self.TCP]],
+ [self.IPV6, self.PERMIT, 5432, self.proto[self.IP][self.UDP]],
+ [self.IPV6, self.PERMIT, 0, self.proto[self.IP][self.TCP]],
+ [self.IPV6, self.PERMIT, 0, self.proto[self.IP][self.UDP]],
+ [self.IPV6, self.PERMIT, 6, self.proto[self.ICMP][self.ICMPv6]],
+ [self.IPV4, self.DENY, self.PORTS_ALL, 0],
+ [self.IPV4, self.DENY, 1234, self.proto[self.IP][self.TCP]],
+ [self.IPV4, self.DENY, 2345, self.proto[self.IP][self.UDP]],
+ [self.IPV4, self.DENY, 5, self.proto[self.ICMP][self.ICMPv4]],
+ [self.IPV6, self.DENY, 4321, self.proto[self.IP][self.TCP]],
+ [self.IPV6, self.DENY, 5432, self.proto[self.IP][self.UDP]],
+ [self.IPV6, self.DENY, 6, self.proto[self.ICMP][self.ICMPv6]],
+ [self.IPV6, self.DENY, self.PORTS_ALL, 0]
+ ]
+
+ # Add and verify new ACLs
+ rules = []
+ for i in range(len(r)):
+ rules.append(self.create_rule(r[i][0], r[i][1], r[i][2], r[i][3]))
+
+ acl = VppAcl(self, rules=rules)
+ acl.add_vpp_config()
+ result = acl.dump()
+
+ i = 0
+ for drules in result:
+ for dr in drules.r:
+ self.assertEqual(dr.is_permit, r[i][1])
+ self.assertEqual(dr.proto, r[i][3])
+
+ if r[i][2] > 0:
+ self.assertEqual(dr.srcport_or_icmptype_first, r[i][2])
+ else:
+ if r[i][2] < 0:
+ self.assertEqual(dr.srcport_or_icmptype_first, 0)
+ self.assertEqual(dr.srcport_or_icmptype_last, 65535)
+ else:
+ if dr.proto == self.proto[self.IP][self.TCP]:
+ self.assertGreater(dr.srcport_or_icmptype_first,
+ self.tcp_sport_from-1)
+ self.assertLess(dr.srcport_or_icmptype_first,
+ self.tcp_sport_to+1)
+ self.assertGreater(dr.dstport_or_icmpcode_last,
+ self.tcp_dport_from-1)
+ self.assertLess(dr.dstport_or_icmpcode_last,
+ self.tcp_dport_to+1)
+ elif dr.proto == self.proto[self.IP][self.UDP]:
+ self.assertGreater(dr.srcport_or_icmptype_first,
+ self.udp_sport_from-1)
+ self.assertLess(dr.srcport_or_icmptype_first,
+ self.udp_sport_to+1)
+ self.assertGreater(dr.dstport_or_icmpcode_last,
+ self.udp_dport_from-1)
+ self.assertLess(dr.dstport_or_icmpcode_last,
+ self.udp_dport_to+1)
+ i += 1
+
+ self.logger.info("ACLP_TEST_FINISH_0014")
+
+ def test_0015_tcp_permit_port_v4(self):
+ """ permit single TCPv4
+ """
+ self.logger.info("ACLP_TEST_START_0015")
+
+ port = random.randint(16384, 65535)
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV4, self.PERMIT, port,
+ self.proto[self.IP][self.TCP]))
+ # deny ip any any in the end
+ rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "permit ip4 tcp %d" % port)
+
+ # Traffic should still pass
+ self.run_verify_test(self.IP, self.IPV4,
+ self.proto[self.IP][self.TCP], port)
+
+ self.logger.info("ACLP_TEST_FINISH_0015")
+
+ def test_0016_udp_permit_port_v4(self):
+ """ permit single UDPv4
+ """
+ self.logger.info("ACLP_TEST_START_0016")
+
+ port = random.randint(16384, 65535)
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV4, self.PERMIT, port,
+ self.proto[self.IP][self.UDP]))
+ # deny ip any any in the end
+ rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "permit ip4 tcp %d" % port)
+
+ # Traffic should still pass
+ self.run_verify_test(self.IP, self.IPV4,
+ self.proto[self.IP][self.UDP], port)
+
+ self.logger.info("ACLP_TEST_FINISH_0016")
+
+ def test_0017_tcp_permit_port_v6(self):
+ """ permit single TCPv6
+ """
+ self.logger.info("ACLP_TEST_START_0017")
+
+ port = random.randint(16384, 65535)
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV6, self.PERMIT, port,
+ self.proto[self.IP][self.TCP]))
+ # deny ip any any in the end
+ rules.append(self.create_rule(self.IPV6, self.DENY, self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "permit ip4 tcp %d" % port)
+
+ # Traffic should still pass
+ self.run_verify_test(self.IP, self.IPV6,
+ self.proto[self.IP][self.TCP], port)
+
+ self.logger.info("ACLP_TEST_FINISH_0017")
+
+ def test_0018_udp_permit_port_v6(self):
+ """ permit single UDPv6
+ """
+ self.logger.info("ACLP_TEST_START_0018")
+
+ port = random.randint(16384, 65535)
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV6, self.PERMIT, port,
+ self.proto[self.IP][self.UDP]))
+ # deny ip any any in the end
+ rules.append(self.create_rule(self.IPV6, self.DENY,
+ self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "permit ip4 tcp %d" % port)
+
+ # Traffic should still pass
+ self.run_verify_test(self.IP, self.IPV6,
+ self.proto[self.IP][self.UDP], port)
+
+ self.logger.info("ACLP_TEST_FINISH_0018")
+
+ def test_0019_udp_deny_port(self):
+ """ deny single TCPv4/v6
+ """
+ self.logger.info("ACLP_TEST_START_0019")
+
+ port = random.randint(16384, 65535)
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV4, self.DENY, port,
+ self.proto[self.IP][self.TCP]))
+ rules.append(self.create_rule(self.IPV6, self.DENY, port,
+ self.proto[self.IP][self.TCP]))
+ # Permit ip any any in the end
+ rules.append(self.create_rule(self.IPV4, self.PERMIT,
+ self.PORTS_ALL, 0))
+ rules.append(self.create_rule(self.IPV6, self.PERMIT,
+ self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "deny ip4/ip6 udp %d" % port)
+
+ # Traffic should not pass
+ self.run_verify_negat_test(self.IP, self.IPRANDOM,
+ self.proto[self.IP][self.TCP], port)
+
+ self.logger.info("ACLP_TEST_FINISH_0019")
+
+ def test_0020_udp_deny_port(self):
+ """ deny single UDPv4/v6
+ """
+ self.logger.info("ACLP_TEST_START_0020")
+
+ port = random.randint(16384, 65535)
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV4, self.DENY, port,
+ self.proto[self.IP][self.UDP]))
+ rules.append(self.create_rule(self.IPV6, self.DENY, port,
+ self.proto[self.IP][self.UDP]))
+ # Permit ip any any in the end
+ rules.append(self.create_rule(self.IPV4, self.PERMIT,
+ self.PORTS_ALL, 0))
+ rules.append(self.create_rule(self.IPV6, self.PERMIT,
+ self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "deny ip4/ip6 udp %d" % port)
+
+ # Traffic should not pass
+ self.run_verify_negat_test(self.IP, self.IPRANDOM,
+ self.proto[self.IP][self.UDP], port)
+
+ self.logger.info("ACLP_TEST_FINISH_0020")
+
+ def test_0021_udp_deny_port_verify_fragment_deny(self):
+ """ deny single UDPv4/v6, permit ip any, verify non-initial fragment
+ blocked
+ """
+ self.logger.info("ACLP_TEST_START_0021")
+
+ port = random.randint(16384, 65535)
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV4, self.DENY, port,
+ self.proto[self.IP][self.UDP]))
+ rules.append(self.create_rule(self.IPV6, self.DENY, port,
+ self.proto[self.IP][self.UDP]))
+ # deny ip any any in the end
+ rules.append(self.create_rule(self.IPV4, self.PERMIT,
+ self.PORTS_ALL, 0))
+ rules.append(self.create_rule(self.IPV6, self.PERMIT,
+ self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "deny ip4/ip6 udp %d" % port)
+
+ # Traffic should not pass
+ self.run_verify_negat_test(self.IP, self.IPRANDOM,
+ self.proto[self.IP][self.UDP], port, True)
+
+ self.logger.info("ACLP_TEST_FINISH_0021")
+
+ def test_0022_zero_length_udp_ipv4(self):
+ """ VPP-687 zero length udp ipv4 packet"""
+ self.logger.info("ACLP_TEST_START_0022")
+
+ port = random.randint(16384, 65535)
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV4, self.PERMIT, port,
+ self.proto[self.IP][self.UDP]))
+ # deny ip any any in the end
+ rules.append(
+ self.create_rule(self.IPV4, self.DENY, self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "permit empty udp ip4 %d" % port)
+
+ # Traffic should still pass
+ # Create incoming packet streams for packet-generator interfaces
+ pkts_cnt = 0
+ pkts = self.create_stream(self.pg0, self.pg_if_packet_sizes,
+ self.IP, self.IPV4,
+ self.proto[self.IP][self.UDP], port,
+ False, False)
+ if len(pkts) > 0:
+ self.pg0.add_stream(pkts)
+ pkts_cnt += len(pkts)
+
+ # Enable packet capture and start packet sendingself.IPV
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ self.pg1.get_capture(pkts_cnt)
+
+ self.logger.info("ACLP_TEST_FINISH_0022")
+
+ def test_0023_zero_length_udp_ipv6(self):
+ """ VPP-687 zero length udp ipv6 packet"""
+ self.logger.info("ACLP_TEST_START_0023")
+
+ port = random.randint(16384, 65535)
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV6, self.PERMIT, port,
+ self.proto[self.IP][self.UDP]))
+ # deny ip any any in the end
+ rules.append(self.create_rule(self.IPV6, self.DENY, self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "permit empty udp ip6 %d" % port)
+
+ # Traffic should still pass
+ # Create incoming packet streams for packet-generator interfaces
+ pkts_cnt = 0
+ pkts = self.create_stream(self.pg0, self.pg_if_packet_sizes,
+ self.IP, self.IPV6,
+ self.proto[self.IP][self.UDP], port,
+ False, False)
+ if len(pkts) > 0:
+ self.pg0.add_stream(pkts)
+ pkts_cnt += len(pkts)
+
+ # Enable packet capture and start packet sendingself.IPV
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # Verify outgoing packet streams per packet-generator interface
+ self.pg1.get_capture(pkts_cnt)
+
+ self.logger.info("ACLP_TEST_FINISH_0023")
+
+ def test_0108_tcp_permit_v4(self):
+ """ permit TCPv4 + non-match range
+ """
+ self.logger.info("ACLP_TEST_START_0108")
+
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_RANGE_2,
+ self.proto[self.IP][self.TCP]))
+ rules.append(self.create_rule(self.IPV4, self.PERMIT, self.PORTS_RANGE,
+ self.proto[self.IP][self.TCP]))
+ # deny ip any any in the end
+ rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "permit ipv4 tcp")
+
+ # Traffic should still pass
+ self.run_verify_test(self.IP, self.IPV4, self.proto[self.IP][self.TCP])
+
+ self.logger.info("ACLP_TEST_FINISH_0108")
+
+ def test_0109_tcp_permit_v6(self):
+ """ permit TCPv6 + non-match range
+ """
+ self.logger.info("ACLP_TEST_START_0109")
+
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV6, self.DENY, self.PORTS_RANGE_2,
+ self.proto[self.IP][self.TCP]))
+ rules.append(self.create_rule(self.IPV6, self.PERMIT, self.PORTS_RANGE,
+ self.proto[self.IP][self.TCP]))
+ # deny ip any any in the end
+ rules.append(self.create_rule(self.IPV6, self.DENY, self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "permit ip6 tcp")
+
+ # Traffic should still pass
+ self.run_verify_test(self.IP, self.IPV6, self.proto[self.IP][self.TCP])
+
+ self.logger.info("ACLP_TEST_FINISH_0109")
+
+ def test_0110_udp_permit_v4(self):
+ """ permit UDPv4 + non-match range
+ """
+ self.logger.info("ACLP_TEST_START_0110")
+
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_RANGE_2,
+ self.proto[self.IP][self.UDP]))
+ rules.append(self.create_rule(self.IPV4, self.PERMIT, self.PORTS_RANGE,
+ self.proto[self.IP][self.UDP]))
+ # deny ip any any in the end
+ rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "permit ipv4 udp")
+
+ # Traffic should still pass
+ self.run_verify_test(self.IP, self.IPV4, self.proto[self.IP][self.UDP])
+
+ self.logger.info("ACLP_TEST_FINISH_0110")
+
+ def test_0111_udp_permit_v6(self):
+ """ permit UDPv6 + non-match range
+ """
+ self.logger.info("ACLP_TEST_START_0111")
+
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV6, self.DENY, self.PORTS_RANGE_2,
+ self.proto[self.IP][self.UDP]))
+ rules.append(self.create_rule(self.IPV6, self.PERMIT, self.PORTS_RANGE,
+ self.proto[self.IP][self.UDP]))
+ # deny ip any any in the end
+ rules.append(self.create_rule(self.IPV6, self.DENY, self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "permit ip6 udp")
+
+ # Traffic should still pass
+ self.run_verify_test(self.IP, self.IPV6, self.proto[self.IP][self.UDP])
+
+ self.logger.info("ACLP_TEST_FINISH_0111")
+
+ def test_0112_tcp_deny(self):
+ """ deny TCPv4/v6 + non-match range
+ """
+ self.logger.info("ACLP_TEST_START_0112")
+
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV4, self.PERMIT,
+ self.PORTS_RANGE_2,
+ self.proto[self.IP][self.TCP]))
+ rules.append(self.create_rule(self.IPV6, self.PERMIT,
+ self.PORTS_RANGE_2,
+ self.proto[self.IP][self.TCP]))
+ rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_RANGE,
+ self.proto[self.IP][self.TCP]))
+ rules.append(self.create_rule(self.IPV6, self.DENY, self.PORTS_RANGE,
+ self.proto[self.IP][self.TCP]))
+ # permit ip any any in the end
+ rules.append(self.create_rule(self.IPV4, self.PERMIT,
+ self.PORTS_ALL, 0))
+ rules.append(self.create_rule(self.IPV6, self.PERMIT,
+ self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "deny ip4/ip6 tcp")
+
+ # Traffic should not pass
+ self.run_verify_negat_test(self.IP, self.IPRANDOM,
+ self.proto[self.IP][self.TCP])
+
+ self.logger.info("ACLP_TEST_FINISH_0112")
+
+ def test_0113_udp_deny(self):
+ """ deny UDPv4/v6 + non-match range
+ """
+ self.logger.info("ACLP_TEST_START_0113")
+
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV4, self.PERMIT,
+ self.PORTS_RANGE_2,
+ self.proto[self.IP][self.UDP]))
+ rules.append(self.create_rule(self.IPV6, self.PERMIT,
+ self.PORTS_RANGE_2,
+ self.proto[self.IP][self.UDP]))
+ rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_RANGE,
+ self.proto[self.IP][self.UDP]))
+ rules.append(self.create_rule(self.IPV6, self.DENY, self.PORTS_RANGE,
+ self.proto[self.IP][self.UDP]))
+ # permit ip any any in the end
+ rules.append(self.create_rule(self.IPV4, self.PERMIT,
+ self.PORTS_ALL, 0))
+ rules.append(self.create_rule(self.IPV6, self.PERMIT,
+ self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "deny ip4/ip6 udp")
+
+ # Traffic should not pass
+ self.run_verify_negat_test(self.IP, self.IPRANDOM,
+ self.proto[self.IP][self.UDP])
+
+ self.logger.info("ACLP_TEST_FINISH_0113")
+
+ def test_0300_tcp_permit_v4_etype_aaaa(self):
+ """ permit TCPv4, send 0xAAAA etype
+ """
+ self.logger.info("ACLP_TEST_START_0300")
+
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_RANGE_2,
+ self.proto[self.IP][self.TCP]))
+ rules.append(self.create_rule(self.IPV4, self.PERMIT, self.PORTS_RANGE,
+ self.proto[self.IP][self.TCP]))
+ # deny ip any any in the end
+ rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "permit ipv4 tcp")
+
+ # Traffic should still pass also for an odd ethertype
+ self.run_verify_test(self.IP, self.IPV4, self.proto[self.IP][self.TCP],
+ 0, False, True, 0xaaaa)
+ self.logger.info("ACLP_TEST_FINISH_0300")
+
+ def test_0305_tcp_permit_v4_etype_blacklist_aaaa(self):
+ """ permit TCPv4, whitelist 0x0BBB ethertype, send 0xAAAA-blocked
+ """
+ self.logger.info("ACLP_TEST_START_0305")
+
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_RANGE_2,
+ self.proto[self.IP][self.TCP]))
+ rules.append(self.create_rule(self.IPV4, self.PERMIT, self.PORTS_RANGE,
+ self.proto[self.IP][self.TCP]))
+ # deny ip any any in the end
+ rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "permit ipv4 tcp")
+ # whitelist the 0xbbbb etype - so the 0xaaaa should be blocked
+ self.etype_whitelist([0xbbb], 1)
+
+ # The oddball ethertype should be blocked
+ self.run_verify_negat_test(self.IP, self.IPV4,
+ self.proto[self.IP][self.TCP],
+ 0, False, 0xaaaa)
+
+ # remove the whitelist
+ self.etype_whitelist([], 0, add=False)
+
+ self.logger.info("ACLP_TEST_FINISH_0305")
+
+ def test_0306_tcp_permit_v4_etype_blacklist_aaaa(self):
+ """ permit TCPv4, whitelist 0x0BBB ethertype, send 0x0BBB - pass
+ """
+ self.logger.info("ACLP_TEST_START_0306")
+
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_RANGE_2,
+ self.proto[self.IP][self.TCP]))
+ rules.append(self.create_rule(self.IPV4, self.PERMIT, self.PORTS_RANGE,
+ self.proto[self.IP][self.TCP]))
+ # deny ip any any in the end
+ rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "permit ipv4 tcp")
+ # whitelist the 0xbbbb etype - so the 0xaaaa should be blocked
+ self.etype_whitelist([0xbbb], 1)
+
+ # The whitelisted traffic, should pass
+ self.run_verify_test(self.IP, self.IPV4, self.proto[self.IP][self.TCP],
+ 0, False, True, 0x0bbb)
+
+ # remove the whitelist, the previously blocked 0xAAAA should pass now
+ self.etype_whitelist([], 0, add=False)
+
+ self.logger.info("ACLP_TEST_FINISH_0306")
+
+ def test_0307_tcp_permit_v4_etype_blacklist_aaaa(self):
+ """ permit TCPv4, whitelist 0x0BBB, remove, send 0xAAAA - pass
+ """
+ self.logger.info("ACLP_TEST_START_0307")
+
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_RANGE_2,
+ self.proto[self.IP][self.TCP]))
+ rules.append(self.create_rule(self.IPV4, self.PERMIT, self.PORTS_RANGE,
+ self.proto[self.IP][self.TCP]))
+ # deny ip any any in the end
+ rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_ALL, 0))
+
+ # Apply rules
+ self.apply_rules(rules, "permit ipv4 tcp")
+
+ # whitelist the 0xbbbb etype - so the 0xaaaa should be blocked
+ self.etype_whitelist([0xbbb], 1)
+ # remove the whitelist, the previously blocked 0xAAAA should pass now
+ self.etype_whitelist([], 0, add=False)
+
+ # The whitelisted traffic, should pass
+ self.run_verify_test(self.IP, self.IPV4, self.proto[self.IP][self.TCP],
+ 0, False, True, 0xaaaa)
+
+ self.logger.info("ACLP_TEST_FINISH_0306")
+
+ def test_0315_del_intf(self):
+ """ apply an acl and delete the interface
+ """
+ self.logger.info("ACLP_TEST_START_0315")
+
+ # Add an ACL
+ rules = []
+ rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_RANGE_2,
+ self.proto[self.IP][self.TCP]))
+ rules.append(self.create_rule(self.IPV4, self.PERMIT, self.PORTS_RANGE,
+ self.proto[self.IP][self.TCP]))
+ # deny ip any any in the end
+ rules.append(self.create_rule(self.IPV4, self.DENY, self.PORTS_ALL, 0))
+
+ # create an interface
+ intf = []
+ intf.append(VppLoInterface(self))
+
+ # Apply rules
+ self.apply_rules_to(rules, "permit ipv4 tcp", intf[0].sw_if_index)
+
+ # Remove the interface
+ intf[0].remove_vpp_config()
+
+ self.logger.info("ACLP_TEST_FINISH_0315")
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_acl_plugin_conns.py b/test/test_acl_plugin_conns.py
new file mode 100644
index 00000000000..c7941fa150b
--- /dev/null
+++ b/test/test_acl_plugin_conns.py
@@ -0,0 +1,405 @@
+#!/usr/bin/env python3
+""" ACL plugin extended stateful tests """
+
+import unittest
+from framework import VppTestCase, VppTestRunner, running_extended_tests
+from scapy.layers.l2 import Ether
+from scapy.packet import Raw
+from scapy.layers.inet import IP, UDP, TCP
+from scapy.packet import Packet
+from socket import inet_pton, AF_INET, AF_INET6
+from scapy.layers.inet6 import IPv6, ICMPv6Unknown, ICMPv6EchoRequest
+from scapy.layers.inet6 import ICMPv6EchoReply, IPv6ExtHdrRouting
+from scapy.layers.inet6 import IPv6ExtHdrFragment
+from pprint import pprint
+from random import randint
+from util import L4_Conn
+from ipaddress import ip_network
+
+from vpp_acl import AclRule, VppAcl, VppAclInterface
+
+
+def to_acl_rule(self, is_permit, wildcard_sport=False):
+ p = self
+ rule_family = AF_INET6 if p.haslayer(IPv6) else AF_INET
+ rule_prefix_len = 128 if p.haslayer(IPv6) else 32
+ rule_l3_layer = IPv6 if p.haslayer(IPv6) else IP
+ rule_l4_sport = p.sport
+ rule_l4_dport = p.dport
+ if p.haslayer(IPv6):
+ rule_l4_proto = p[IPv6].nh
+ else:
+ rule_l4_proto = p[IP].proto
+
+ if wildcard_sport:
+ rule_l4_sport_first = 0
+ rule_l4_sport_last = 65535
+ else:
+ rule_l4_sport_first = rule_l4_sport
+ rule_l4_sport_last = rule_l4_sport
+
+ new_rule = AclRule(is_permit=is_permit, proto=rule_l4_proto,
+ src_prefix=ip_network(
+ (p[rule_l3_layer].src, rule_prefix_len)),
+ dst_prefix=ip_network(
+ (p[rule_l3_layer].dst, rule_prefix_len)),
+ sport_from=rule_l4_sport_first,
+ sport_to=rule_l4_sport_last,
+ dport_from=rule_l4_dport, dport_to=rule_l4_dport)
+
+ return new_rule
+
+
+Packet.to_acl_rule = to_acl_rule
+
+
+class IterateWithSleep():
+ def __init__(self, testcase, n_iters, description, sleep_sec):
+ self.curr = 0
+ self.testcase = testcase
+ self.n_iters = n_iters
+ self.sleep_sec = sleep_sec
+ self.description = description
+
+ def __iter__(self):
+ for x in range(0, self.n_iters):
+ yield x
+ self.testcase.sleep(self.sleep_sec)
+
+
+class Conn(L4_Conn):
+ def apply_acls(self, reflect_side, acl_side):
+ pkts = []
+ pkts.append(self.pkt(0))
+ pkts.append(self.pkt(1))
+ pkt = pkts[reflect_side]
+
+ r = []
+ r.append(pkt.to_acl_rule(2, wildcard_sport=True))
+ r.append(self.wildcard_rule(0))
+ reflect_acl = VppAcl(self.testcase, r)
+ reflect_acl.add_vpp_config()
+
+ r = []
+ r.append(self.wildcard_rule(0))
+ deny_acl = VppAcl(self.testcase, r)
+ deny_acl.add_vpp_config()
+
+ if reflect_side == acl_side:
+ acl_if0 = VppAclInterface(self.testcase,
+ self.ifs[acl_side].sw_if_index,
+ [reflect_acl, deny_acl], n_input=1)
+ acl_if1 = VppAclInterface(self.testcase,
+ self.ifs[1-acl_side].sw_if_index, [],
+ n_input=0)
+ acl_if0.add_vpp_config()
+ acl_if1.add_vpp_config()
+ else:
+ acl_if0 = VppAclInterface(self.testcase,
+ self.ifs[acl_side].sw_if_index,
+ [deny_acl, reflect_acl], n_input=1)
+ acl_if1 = VppAclInterface(self.testcase,
+ self.ifs[1-acl_side].sw_if_index, [],
+ n_input=0)
+ acl_if0.add_vpp_config()
+ acl_if1.add_vpp_config()
+
+ def wildcard_rule(self, is_permit):
+ any_addr = ["0.0.0.0", "::"]
+ rule_family = self.address_family
+ is_ip6 = 1 if rule_family == AF_INET6 else 0
+ new_rule = AclRule(is_permit=is_permit, proto=0,
+ src_prefix=ip_network(
+ (any_addr[is_ip6], 0)),
+ dst_prefix=ip_network(
+ (any_addr[is_ip6], 0)),
+ sport_from=0, sport_to=65535, dport_from=0,
+ dport_to=65535)
+ return new_rule
+
+
+@unittest.skipUnless(running_extended_tests, "part of extended tests")
+class ACLPluginConnTestCase(VppTestCase):
+ """ ACL plugin connection-oriented extended testcases """
+
+ @classmethod
+ def setUpClass(cls):
+ super(ACLPluginConnTestCase, cls).setUpClass()
+ # create pg0 and pg1
+ cls.create_pg_interfaces(range(2))
+ cmd = "set acl-plugin session table event-trace 1"
+ cls.logger.info(cls.vapi.cli(cmd))
+ for i in cls.pg_interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.config_ip6()
+ i.resolve_arp()
+ i.resolve_ndp()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(ACLPluginConnTestCase, cls).tearDownClass()
+
+ def tearDown(self):
+ """Run standard test teardown and log various show commands
+ """
+ super(ACLPluginConnTestCase, self).tearDown()
+
+ def show_commands_at_teardown(self):
+ self.logger.info(self.vapi.cli("show ip neighbors"))
+ self.logger.info(self.vapi.cli("show ip6 neighbors"))
+ self.logger.info(self.vapi.cli("show acl-plugin sessions"))
+ self.logger.info(self.vapi.cli("show acl-plugin acl"))
+ self.logger.info(self.vapi.cli("show acl-plugin interface"))
+ self.logger.info(self.vapi.cli("show acl-plugin tables"))
+ self.logger.info(self.vapi.cli("show event-logger all"))
+
+ def run_basic_conn_test(self, af, acl_side):
+ """ Basic conn timeout test """
+ conn1 = Conn(self, self.pg0, self.pg1, af, UDP, 42001, 4242)
+ conn1.apply_acls(0, acl_side)
+ conn1.send_through(0)
+ # the return packets should pass
+ conn1.send_through(1)
+ # send some packets on conn1, ensure it doesn't go away
+ for i in IterateWithSleep(self, 20, "Keep conn active", 0.3):
+ conn1.send_through(1)
+ # allow the conn to time out
+ for i in IterateWithSleep(self, 30, "Wait for timeout", 0.1):
+ pass
+ # now try to send a packet on the reflected side
+ try:
+ p2 = conn1.send_through(1).command()
+ except:
+ # If we asserted while waiting, it's good.
+ # the conn should have timed out.
+ p2 = None
+ self.assert_equal(p2, None, "packet on long-idle conn")
+
+ def run_active_conn_test(self, af, acl_side):
+ """ Idle connection behind active connection test """
+ base = 10000 + 1000*acl_side
+ conn1 = Conn(self, self.pg0, self.pg1, af, UDP, base + 1, 2323)
+ conn2 = Conn(self, self.pg0, self.pg1, af, UDP, base + 2, 2323)
+ conn3 = Conn(self, self.pg0, self.pg1, af, UDP, base + 3, 2323)
+ conn1.apply_acls(0, acl_side)
+ conn1.send(0)
+ conn1.recv(1)
+ # create and check that the conn2/3 work
+ self.sleep(0.1)
+ conn2.send_pingpong(0)
+ self.sleep(0.1)
+ conn3.send_pingpong(0)
+ # send some packets on conn1, keep conn2/3 idle
+ for i in IterateWithSleep(self, 20, "Keep conn active", 0.2):
+ conn1.send_through(1)
+ try:
+ p2 = conn2.send_through(1).command()
+ except:
+ # If we asserted while waiting, it's good.
+ # the conn should have timed out.
+ p2 = None
+ # We should have not received the packet on a long-idle
+ # connection, because it should have timed out
+ # If it didn't - it is a problem
+ self.assert_equal(p2, None, "packet on long-idle conn")
+
+ def run_clear_conn_test(self, af, acl_side):
+ """ Clear the connections via CLI """
+ conn1 = Conn(self, self.pg0, self.pg1, af, UDP, 42001, 4242)
+ conn1.apply_acls(0, acl_side)
+ conn1.send_through(0)
+ # the return packets should pass
+ conn1.send_through(1)
+ # send some packets on conn1, ensure it doesn't go away
+ for i in IterateWithSleep(self, 20, "Keep conn active", 0.3):
+ conn1.send_through(1)
+ # clear all connections
+ self.vapi.ppcli("clear acl-plugin sessions")
+ # now try to send a packet on the reflected side
+ try:
+ p2 = conn1.send_through(1).command()
+ except:
+ # If we asserted while waiting, it's good.
+ # the conn should have timed out.
+ p2 = None
+ self.assert_equal(p2, None, "packet on supposedly deleted conn")
+
+ def run_tcp_transient_setup_conn_test(self, af, acl_side):
+ conn1 = Conn(self, self.pg0, self.pg1, af, TCP, 53001, 5151)
+ conn1.apply_acls(0, acl_side)
+ conn1.send_through(0, 'S')
+ # the return packets should pass
+ conn1.send_through(1, 'SA')
+ # allow the conn to time out
+ for i in IterateWithSleep(self, 30, "Wait for timeout", 0.1):
+ pass
+ # ensure conn times out
+ try:
+ p2 = conn1.send_through(1).command()
+ except:
+ # If we asserted while waiting, it's good.
+ # the conn should have timed out.
+ p2 = None
+ self.assert_equal(p2, None, "packet on supposedly deleted conn")
+
+ def run_tcp_established_conn_test(self, af, acl_side):
+ conn1 = Conn(self, self.pg0, self.pg1, af, TCP, 53002, 5052)
+ conn1.apply_acls(0, acl_side)
+ conn1.send_through(0, 'S')
+ # the return packets should pass
+ conn1.send_through(1, 'SA')
+ # complete the threeway handshake
+ # (NB: sequence numbers not tracked, so not set!)
+ conn1.send_through(0, 'A')
+ # allow the conn to time out if it's in embryonic timer
+ for i in IterateWithSleep(self, 30, "Wait for transient timeout", 0.1):
+ pass
+ # Try to send the packet from the "forbidden" side - it must pass
+ conn1.send_through(1, 'A')
+ # ensure conn times out for real
+ for i in IterateWithSleep(self, 130, "Wait for timeout", 0.1):
+ pass
+ try:
+ p2 = conn1.send_through(1).command()
+ except:
+ # If we asserted while waiting, it's good.
+ # the conn should have timed out.
+ p2 = None
+ self.assert_equal(p2, None, "packet on supposedly deleted conn")
+
+ def run_tcp_transient_teardown_conn_test(self, af, acl_side):
+ conn1 = Conn(self, self.pg0, self.pg1, af, TCP, 53002, 5052)
+ conn1.apply_acls(0, acl_side)
+ conn1.send_through(0, 'S')
+ # the return packets should pass
+ conn1.send_through(1, 'SA')
+ # complete the threeway handshake
+ # (NB: sequence numbers not tracked, so not set!)
+ conn1.send_through(0, 'A')
+ # allow the conn to time out if it's in embryonic timer
+ for i in IterateWithSleep(self, 30, "Wait for transient timeout", 0.1):
+ pass
+ # Try to send the packet from the "forbidden" side - it must pass
+ conn1.send_through(1, 'A')
+ # Send the FIN to bounce the session out of established
+ conn1.send_through(1, 'FA')
+ # If conn landed on transient timer it will time out here
+ for i in IterateWithSleep(self, 30, "Wait for transient timeout", 0.1):
+ pass
+ # Now it should have timed out already
+ try:
+ p2 = conn1.send_through(1).command()
+ except:
+ # If we asserted while waiting, it's good.
+ # the conn should have timed out.
+ p2 = None
+ self.assert_equal(p2, None, "packet on supposedly deleted conn")
+
+ def test_0000_conn_prepare_test(self):
+ """ Prepare the settings """
+ self.vapi.ppcli("set acl-plugin session timeout udp idle 1")
+
+ def test_0001_basic_conn_test(self):
+ """ IPv4: Basic conn timeout test reflect on ingress """
+ self.run_basic_conn_test(AF_INET, 0)
+
+ def test_0002_basic_conn_test(self):
+ """ IPv4: Basic conn timeout test reflect on egress """
+ self.run_basic_conn_test(AF_INET, 1)
+
+ def test_0005_clear_conn_test(self):
+ """ IPv4: reflect egress, clear conn """
+ self.run_clear_conn_test(AF_INET, 1)
+
+ def test_0006_clear_conn_test(self):
+ """ IPv4: reflect ingress, clear conn """
+ self.run_clear_conn_test(AF_INET, 0)
+
+ def test_0011_active_conn_test(self):
+ """ IPv4: Idle conn behind active conn, reflect on ingress """
+ self.run_active_conn_test(AF_INET, 0)
+
+ def test_0012_active_conn_test(self):
+ """ IPv4: Idle conn behind active conn, reflect on egress """
+ self.run_active_conn_test(AF_INET, 1)
+
+ def test_1001_basic_conn_test(self):
+ """ IPv6: Basic conn timeout test reflect on ingress """
+ self.run_basic_conn_test(AF_INET6, 0)
+
+ def test_1002_basic_conn_test(self):
+ """ IPv6: Basic conn timeout test reflect on egress """
+ self.run_basic_conn_test(AF_INET6, 1)
+
+ def test_1005_clear_conn_test(self):
+ """ IPv6: reflect egress, clear conn """
+ self.run_clear_conn_test(AF_INET6, 1)
+
+ def test_1006_clear_conn_test(self):
+ """ IPv6: reflect ingress, clear conn """
+ self.run_clear_conn_test(AF_INET6, 0)
+
+ def test_1011_active_conn_test(self):
+ """ IPv6: Idle conn behind active conn, reflect on ingress """
+ self.run_active_conn_test(AF_INET6, 0)
+
+ def test_1012_active_conn_test(self):
+ """ IPv6: Idle conn behind active conn, reflect on egress """
+ self.run_active_conn_test(AF_INET6, 1)
+
+ def test_2000_prepare_for_tcp_test(self):
+ """ Prepare for TCP session tests """
+ # ensure the session hangs on if it gets treated as UDP
+ self.vapi.ppcli("set acl-plugin session timeout udp idle 200")
+ # let the TCP connection time out at 5 seconds
+ self.vapi.ppcli("set acl-plugin session timeout tcp idle 10")
+ self.vapi.ppcli("set acl-plugin session timeout tcp transient 1")
+
+ def test_2001_tcp_transient_conn_test(self):
+ """ IPv4: transient TCP session (incomplete 3WHS), ref. on ingress """
+ self.run_tcp_transient_setup_conn_test(AF_INET, 0)
+
+ def test_2002_tcp_transient_conn_test(self):
+ """ IPv4: transient TCP session (incomplete 3WHS), ref. on egress """
+ self.run_tcp_transient_setup_conn_test(AF_INET, 1)
+
+ def test_2003_tcp_transient_conn_test(self):
+ """ IPv4: established TCP session (complete 3WHS), ref. on ingress """
+ self.run_tcp_established_conn_test(AF_INET, 0)
+
+ def test_2004_tcp_transient_conn_test(self):
+ """ IPv4: established TCP session (complete 3WHS), ref. on egress """
+ self.run_tcp_established_conn_test(AF_INET, 1)
+
+ def test_2005_tcp_transient_teardown_conn_test(self):
+ """ IPv4: transient TCP session (3WHS,ACK,FINACK), ref. on ingress """
+ self.run_tcp_transient_teardown_conn_test(AF_INET, 0)
+
+ def test_2006_tcp_transient_teardown_conn_test(self):
+ """ IPv4: transient TCP session (3WHS,ACK,FINACK), ref. on egress """
+ self.run_tcp_transient_teardown_conn_test(AF_INET, 1)
+
+ def test_3001_tcp_transient_conn_test(self):
+ """ IPv6: transient TCP session (incomplete 3WHS), ref. on ingress """
+ self.run_tcp_transient_setup_conn_test(AF_INET6, 0)
+
+ def test_3002_tcp_transient_conn_test(self):
+ """ IPv6: transient TCP session (incomplete 3WHS), ref. on egress """
+ self.run_tcp_transient_setup_conn_test(AF_INET6, 1)
+
+ def test_3003_tcp_transient_conn_test(self):
+ """ IPv6: established TCP session (complete 3WHS), ref. on ingress """
+ self.run_tcp_established_conn_test(AF_INET6, 0)
+
+ def test_3004_tcp_transient_conn_test(self):
+ """ IPv6: established TCP session (complete 3WHS), ref. on egress """
+ self.run_tcp_established_conn_test(AF_INET6, 1)
+
+ def test_3005_tcp_transient_teardown_conn_test(self):
+ """ IPv6: transient TCP session (3WHS,ACK,FINACK), ref. on ingress """
+ self.run_tcp_transient_teardown_conn_test(AF_INET6, 0)
+
+ def test_3006_tcp_transient_teardown_conn_test(self):
+ """ IPv6: transient TCP session (3WHS,ACK,FINACK), ref. on egress """
+ self.run_tcp_transient_teardown_conn_test(AF_INET6, 1)
diff --git a/test/test_acl_plugin_l2l3.py b/test/test_acl_plugin_l2l3.py
new file mode 100644
index 00000000000..48faafb7398
--- /dev/null
+++ b/test/test_acl_plugin_l2l3.py
@@ -0,0 +1,864 @@
+#!/usr/bin/env python3
+"""ACL IRB Test Case HLD:
+
+**config**
+ - L2 MAC learning enabled in l2bd
+ - 2 routed interfaces untagged, bvi (Bridge Virtual Interface)
+ - 2 bridged interfaces in l2bd with bvi
+
+**test**
+ - sending ip4 eth pkts between routed interfaces
+ - 2 routed interfaces
+ - 2 bridged interfaces
+
+ - 64B, 512B, 1518B, 9200B (ether_size)
+
+ - burst of pkts per interface
+ - 257pkts per burst
+ - routed pkts hitting different FIB entries
+ - bridged pkts hitting different MAC entries
+
+**verify**
+ - all packets received correctly
+
+"""
+
+import copy
+import unittest
+from socket import inet_pton, AF_INET, AF_INET6
+from random import choice, shuffle
+from pprint import pprint
+from ipaddress import ip_network
+
+import scapy.compat
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, UDP, ICMP, TCP
+from scapy.layers.inet6 import IPv6, ICMPv6Unknown, ICMPv6EchoRequest
+from scapy.layers.inet6 import ICMPv6EchoReply, IPv6ExtHdrRouting
+from scapy.layers.inet6 import IPv6ExtHdrFragment
+
+from framework import VppTestCase, VppTestRunner
+from vpp_l2 import L2_PORT_TYPE
+import time
+
+from vpp_acl import AclRule, VppAcl, VppAclInterface
+
+
+class TestACLpluginL2L3(VppTestCase):
+ """TestACLpluginL2L3 Test Case"""
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ #. Create BD with MAC learning enabled and put interfaces to this BD.
+ #. Configure IPv4 addresses on loopback interface and routed interface.
+ #. Configure MAC address binding to IPv4 neighbors on loop0.
+ #. Configure MAC address on pg2.
+ #. Loopback BVI interface has remote hosts, one half of hosts are
+ behind pg0 second behind pg1.
+ """
+ super(TestACLpluginL2L3, cls).setUpClass()
+
+ cls.pg_if_packet_sizes = [64, 512, 1518, 9018] # packet sizes
+ cls.bd_id = 10
+ cls.remote_hosts_count = 250
+
+ # create 3 pg interfaces, 1 loopback interface
+ cls.create_pg_interfaces(range(3))
+ cls.create_loopback_interfaces(1)
+
+ cls.interfaces = list(cls.pg_interfaces)
+ cls.interfaces.extend(cls.lo_interfaces)
+
+ for i in cls.interfaces:
+ i.admin_up()
+
+ # Create BD with MAC learning enabled and put interfaces to this BD
+ cls.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=cls.loop0.sw_if_index, bd_id=cls.bd_id,
+ port_type=L2_PORT_TYPE.BVI)
+ cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=cls.pg0.sw_if_index,
+ bd_id=cls.bd_id)
+ cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=cls.pg1.sw_if_index,
+ bd_id=cls.bd_id)
+
+ # Configure IPv4 addresses on loopback interface and routed interface
+ cls.loop0.config_ip4()
+ cls.loop0.config_ip6()
+ cls.pg2.config_ip4()
+ cls.pg2.config_ip6()
+
+ # Configure MAC address binding to IPv4 neighbors on loop0
+ cls.loop0.generate_remote_hosts(cls.remote_hosts_count)
+ cls.loop0.configure_ipv4_neighbors()
+ cls.loop0.configure_ipv6_neighbors()
+ # configure MAC address on pg2
+ cls.pg2.resolve_arp()
+ cls.pg2.resolve_ndp()
+
+ cls.WITHOUT_EH = False
+ cls.WITH_EH = True
+ cls.STATELESS_ICMP = False
+ cls.STATEFUL_ICMP = True
+
+ # Loopback BVI interface has remote hosts, one half of hosts are behind
+ # pg0 second behind pg1
+ half = cls.remote_hosts_count // 2
+ cls.pg0.remote_hosts = cls.loop0.remote_hosts[:half]
+ cls.pg1.remote_hosts = cls.loop0.remote_hosts[half:]
+ reply = cls.vapi.papi.acl_stats_intf_counters_enable(enable=1)
+
+ @classmethod
+ def tearDownClass(cls):
+ reply = cls.vapi.papi.acl_stats_intf_counters_enable(enable=0)
+ super(TestACLpluginL2L3, cls).tearDownClass()
+
+ def tearDown(self):
+ """Run standard test teardown and log ``show l2patch``,
+ ``show l2fib verbose``,``show bridge-domain <bd_id> detail``,
+ ``show ip neighbors``.
+ """
+ super(TestACLpluginL2L3, self).tearDown()
+
+ def show_commands_at_teardown(self):
+ self.logger.info(self.vapi.cli("show l2patch"))
+ self.logger.info(self.vapi.cli("show classify tables"))
+ self.logger.info(self.vapi.cli("show l2fib verbose"))
+ self.logger.info(self.vapi.cli("show bridge-domain %s detail" %
+ self.bd_id))
+ self.logger.info(self.vapi.cli("show ip neighbors"))
+ cmd = "show acl-plugin sessions verbose 1"
+ self.logger.info(self.vapi.cli(cmd))
+ self.logger.info(self.vapi.cli("show acl-plugin acl"))
+ self.logger.info(self.vapi.cli("show acl-plugin interface"))
+ self.logger.info(self.vapi.cli("show acl-plugin tables"))
+
+ def create_stream(self, src_ip_if, dst_ip_if, reverse, packet_sizes,
+ is_ip6, expect_blocked, expect_established,
+ add_extension_header, icmp_stateful=False):
+ pkts = []
+ rules = []
+ permit_rules = []
+ permit_and_reflect_rules = []
+ total_packet_count = 8
+ for i in range(0, total_packet_count):
+ modulo = (i//2) % 2
+ icmp_type_delta = i % 2
+ icmp_code = i
+ is_udp_packet = (modulo == 0)
+ if is_udp_packet and icmp_stateful:
+ continue
+ is_reflectable_icmp = (icmp_stateful and icmp_type_delta == 0 and
+ not is_udp_packet)
+ is_reflected_icmp = is_reflectable_icmp and expect_established
+ can_reflect_this_packet = is_udp_packet or is_reflectable_icmp
+ is_permit = i % 2
+ remote_dst_index = i % len(dst_ip_if.remote_hosts)
+ remote_dst_host = dst_ip_if.remote_hosts[remote_dst_index]
+ if is_permit == 1:
+ info = self.create_packet_info(src_ip_if, dst_ip_if)
+ payload = self.info_to_payload(info)
+ else:
+ to_be_blocked = False
+ if (expect_blocked and not expect_established):
+ to_be_blocked = True
+ if (not can_reflect_this_packet):
+ to_be_blocked = True
+ if to_be_blocked:
+ payload = "to be blocked"
+ else:
+ info = self.create_packet_info(src_ip_if, dst_ip_if)
+ payload = self.info_to_payload(info)
+ if reverse:
+ dst_mac = 'de:ad:00:00:00:00'
+ src_mac = remote_dst_host._mac
+ dst_ip6 = src_ip_if.remote_ip6
+ src_ip6 = remote_dst_host.ip6
+ dst_ip4 = src_ip_if.remote_ip4
+ src_ip4 = remote_dst_host.ip4
+ dst_l4 = 1234 + i
+ src_l4 = 4321 + i
+ else:
+ dst_mac = src_ip_if.local_mac
+ src_mac = src_ip_if.remote_mac
+ src_ip6 = src_ip_if.remote_ip6
+ dst_ip6 = remote_dst_host.ip6
+ src_ip4 = src_ip_if.remote_ip4
+ dst_ip4 = remote_dst_host.ip4
+ src_l4 = 1234 + i
+ dst_l4 = 4321 + i
+ if is_reflected_icmp:
+ icmp_type_delta = 1
+
+ # default ULP should be something we do not use in tests
+ ulp_l4 = TCP(sport=src_l4, dport=dst_l4)
+ # potentially a chain of protocols leading to ULP
+ ulp = ulp_l4
+
+ if is_udp_packet:
+ if is_ip6:
+ ulp_l4 = UDP(sport=src_l4, dport=dst_l4)
+ if add_extension_header:
+ # prepend some extension headers
+ ulp = (IPv6ExtHdrRouting() / IPv6ExtHdrRouting() /
+ IPv6ExtHdrFragment(offset=0, m=1) / ulp_l4)
+ # uncomment below to test invalid ones
+ # ulp = IPv6ExtHdrRouting(len = 200) / ulp_l4
+ else:
+ ulp = ulp_l4
+ p = (Ether(dst=dst_mac, src=src_mac) /
+ IPv6(src=src_ip6, dst=dst_ip6) /
+ ulp /
+ Raw(payload))
+ else:
+ ulp_l4 = UDP(sport=src_l4, dport=dst_l4)
+ # IPv4 does not allow extension headers,
+ # but we rather make it a first fragment
+ flags = 1 if add_extension_header else 0
+ ulp = ulp_l4
+ p = (Ether(dst=dst_mac, src=src_mac) /
+ IP(src=src_ip4, dst=dst_ip4, frag=0, flags=flags) /
+ ulp /
+ Raw(payload))
+ elif modulo == 1:
+ if is_ip6:
+ ulp_l4 = ICMPv6Unknown(type=128 + icmp_type_delta,
+ code=icmp_code)
+ ulp = ulp_l4
+ p = (Ether(dst=dst_mac, src=src_mac) /
+ IPv6(src=src_ip6, dst=dst_ip6) /
+ ulp /
+ Raw(payload))
+ else:
+ ulp_l4 = ICMP(type=8 - 8*icmp_type_delta, code=icmp_code)
+ ulp = ulp_l4
+ p = (Ether(dst=dst_mac, src=src_mac) /
+ IP(src=src_ip4, dst=dst_ip4) /
+ ulp /
+ Raw(payload))
+
+ if i % 2 == 1:
+ info.data = p.copy()
+ size = packet_sizes[(i // 2) % len(packet_sizes)]
+ self.extend_packet(p, size)
+ pkts.append(p)
+
+ rule_family = AF_INET6 if p.haslayer(IPv6) else AF_INET
+ rule_prefix_len = 128 if p.haslayer(IPv6) else 32
+ rule_l3_layer = IPv6 if p.haslayer(IPv6) else IP
+
+ if p.haslayer(UDP):
+ rule_l4_sport = p[UDP].sport
+ rule_l4_dport = p[UDP].dport
+ else:
+ if p.haslayer(ICMP):
+ rule_l4_sport = p[ICMP].type
+ rule_l4_dport = p[ICMP].code
+ else:
+ rule_l4_sport = p[ICMPv6Unknown].type
+ rule_l4_dport = p[ICMPv6Unknown].code
+ if p.haslayer(IPv6):
+ rule_l4_proto = ulp_l4.overload_fields[IPv6]['nh']
+ else:
+ rule_l4_proto = p[IP].proto
+
+ new_rule = AclRule(is_permit=is_permit, proto=rule_l4_proto,
+ src_prefix=ip_network(
+ (p[rule_l3_layer].src, rule_prefix_len)),
+ dst_prefix=ip_network(
+ (p[rule_l3_layer].dst, rule_prefix_len)),
+ sport_from=rule_l4_sport,
+ sport_to=rule_l4_sport,
+ dport_from=rule_l4_dport,
+ dport_to=rule_l4_dport)
+
+ rules.append(new_rule)
+ new_rule_permit = copy.copy(new_rule)
+ new_rule_permit.is_permit = 1
+ permit_rules.append(new_rule_permit)
+
+ new_rule_permit_and_reflect = copy.copy(new_rule)
+ if can_reflect_this_packet:
+ new_rule_permit_and_reflect.is_permit = 2
+ else:
+ new_rule_permit_and_reflect.is_permit = is_permit
+
+ permit_and_reflect_rules.append(new_rule_permit_and_reflect)
+ self.logger.info("create_stream pkt#%d: %s" % (i, payload))
+
+ return {'stream': pkts,
+ 'rules': rules,
+ 'permit_rules': permit_rules,
+ 'permit_and_reflect_rules': permit_and_reflect_rules}
+
+ def verify_capture(self, dst_ip_if, src_ip_if, capture, reverse):
+ last_info = dict()
+ for i in self.interfaces:
+ last_info[i.sw_if_index] = None
+
+ dst_ip_sw_if_index = dst_ip_if.sw_if_index
+
+ for packet in capture:
+ l3 = IP if packet.haslayer(IP) else IPv6
+ ip = packet[l3]
+ if packet.haslayer(UDP):
+ l4 = UDP
+ else:
+ if packet.haslayer(ICMP):
+ l4 = ICMP
+ else:
+ l4 = ICMPv6Unknown
+
+ # Scapy IPv6 stuff is too smart for its own good.
+ # So we do this and coerce the ICMP into unknown type
+ if packet.haslayer(UDP):
+ data = scapy.compat.raw(packet[UDP][Raw])
+ else:
+ if l3 == IP:
+ data = scapy.compat.raw(ICMP(
+ scapy.compat.raw(packet[l3].payload))[Raw])
+ else:
+ data = scapy.compat.raw(ICMPv6Unknown(
+ scapy.compat.raw(packet[l3].payload)).msgbody)
+ udp_or_icmp = packet[l3].payload
+ data_obj = Raw(data)
+ # FIXME: make framework believe we are on object
+ payload_info = self.payload_to_info(data_obj)
+ packet_index = payload_info.index
+
+ self.assertEqual(payload_info.dst, dst_ip_sw_if_index)
+
+ next_info = self.get_next_packet_info_for_interface2(
+ payload_info.src, dst_ip_sw_if_index,
+ last_info[payload_info.src])
+ last_info[payload_info.src] = next_info
+ self.assertTrue(next_info is not None)
+ self.assertEqual(packet_index, next_info.index)
+ saved_packet = next_info.data
+ self.assertTrue(next_info is not None)
+
+ # MAC: src, dst
+ if not reverse:
+ self.assertEqual(packet.src, dst_ip_if.local_mac)
+ host = dst_ip_if.host_by_mac(packet.dst)
+
+ # IP: src, dst
+ # self.assertEqual(ip.src, src_ip_if.remote_ip4)
+ if saved_packet is not None:
+ self.assertEqual(ip.src, saved_packet[l3].src)
+ self.assertEqual(ip.dst, saved_packet[l3].dst)
+ if l4 == UDP:
+ self.assertEqual(udp_or_icmp.sport, saved_packet[l4].sport)
+ self.assertEqual(udp_or_icmp.dport, saved_packet[l4].dport)
+ # self.assertEqual(ip.dst, host.ip4)
+
+ # UDP:
+
+ def applied_acl_shuffle(self, acl_if):
+ saved_n_input = acl_if.n_input
+ # TOTO: maybe copy each one??
+ saved_acls = acl_if.acls
+
+ # now create a list of all the rules in all ACLs
+ all_rules = []
+ for old_acl in saved_acls:
+ for rule in old_acl.rules:
+ all_rules.append(rule)
+
+ # Add a few ACLs made from shuffled rules
+ shuffle(all_rules)
+ acl1 = VppAcl(self, rules=all_rules[::2], tag="shuffle 1. acl")
+ acl1.add_vpp_config()
+
+ shuffle(all_rules)
+ acl2 = VppAcl(self, rules=all_rules[::3], tag="shuffle 2. acl")
+ acl2.add_vpp_config()
+
+ shuffle(all_rules)
+ acl3 = VppAcl(self, rules=all_rules[::2], tag="shuffle 3. acl")
+ acl3.add_vpp_config()
+
+ # apply the shuffle ACLs in front
+ input_acls = [acl1, acl2]
+ output_acls = [acl1, acl2]
+
+ # add the currently applied ACLs
+ n_input = acl_if.n_input
+ input_acls.extend(saved_acls[:n_input])
+ output_acls.extend(saved_acls[n_input:])
+
+ # and the trailing shuffle ACL(s)
+ input_acls.extend([acl3])
+ output_acls.extend([acl3])
+
+ # set the interface ACL list to the result
+ acl_if.n_input = len(input_acls)
+ acl_if.acls = input_acls + output_acls
+ acl_if.add_vpp_config()
+
+ # change the ACLs a few times
+ for i in range(1, 10):
+ shuffle(all_rules)
+ acl1.modify_vpp_config(all_rules[::1+(i % 2)])
+
+ shuffle(all_rules)
+ acl2.modify_vpp_config(all_rules[::1+(i % 3)])
+
+ shuffle(all_rules)
+ acl3.modify_vpp_config(all_rules[::1+(i % 5)])
+
+ # restore to how it was before and clean up
+ acl_if.n_input = saved_n_input
+ acl_if.acls = saved_acls
+ acl_if.add_vpp_config()
+
+ acl1.remove_vpp_config()
+ acl2.remove_vpp_config()
+ acl3.remove_vpp_config()
+
+ def create_acls_for_a_stream(self, stream_dict,
+ test_l2_action, is_reflect):
+ r = stream_dict['rules']
+ r_permit = stream_dict['permit_rules']
+ r_permit_reflect = stream_dict['permit_and_reflect_rules']
+ r_action = r_permit_reflect if is_reflect else r
+ action_acl = VppAcl(self, rules=r_action, tag="act. acl")
+ action_acl.add_vpp_config()
+ permit_acl = VppAcl(self, rules=r_permit, tag="perm. acl")
+ permit_acl.add_vpp_config()
+
+ return {'L2': action_acl if test_l2_action else permit_acl,
+ 'L3': permit_acl if test_l2_action else action_acl,
+ 'permit': permit_acl, 'action': action_acl}
+
+ def apply_acl_ip46_x_to_y(self, bridged_to_routed, test_l2_deny,
+ is_ip6, is_reflect, add_eh):
+ """ Apply the ACLs
+ """
+ self.reset_packet_infos()
+ stream_dict = self.create_stream(
+ self.pg2, self.loop0,
+ bridged_to_routed,
+ self.pg_if_packet_sizes, is_ip6,
+ not is_reflect, False, add_eh)
+ stream = stream_dict['stream']
+ acl_idx = self.create_acls_for_a_stream(stream_dict, test_l2_deny,
+ is_reflect)
+ n_input_l3 = 0 if bridged_to_routed else 1
+ n_input_l2 = 1 if bridged_to_routed else 0
+
+ acl_if_pg2 = VppAclInterface(self, sw_if_index=self.pg2.sw_if_index,
+ n_input=n_input_l3, acls=[acl_idx['L3']])
+ acl_if_pg2.add_vpp_config()
+
+ acl_if_pg0 = VppAclInterface(self, sw_if_index=self.pg0.sw_if_index,
+ n_input=n_input_l2, acls=[acl_idx['L2']])
+ acl_if_pg0.add_vpp_config()
+
+ acl_if_pg1 = VppAclInterface(self, sw_if_index=self.pg1.sw_if_index,
+ n_input=n_input_l2, acls=[acl_idx['L2']])
+ acl_if_pg1.add_vpp_config()
+
+ self.applied_acl_shuffle(acl_if_pg0)
+ self.applied_acl_shuffle(acl_if_pg1)
+ return {'L2': acl_idx['L2'], 'L3': acl_idx['L3']}
+
+ def apply_acl_ip46_both_directions_reflect(self,
+ primary_is_bridged_to_routed,
+ reflect_on_l2, is_ip6, add_eh,
+ stateful_icmp):
+ primary_is_routed_to_bridged = not primary_is_bridged_to_routed
+ self.reset_packet_infos()
+ stream_dict_fwd = self.create_stream(self.pg2, self.loop0,
+ primary_is_bridged_to_routed,
+ self.pg_if_packet_sizes, is_ip6,
+ False, False, add_eh,
+ stateful_icmp)
+ acl_idx_fwd = self.create_acls_for_a_stream(stream_dict_fwd,
+ reflect_on_l2, True)
+
+ stream_dict_rev = self.create_stream(self.pg2, self.loop0,
+ not primary_is_bridged_to_routed,
+ self.pg_if_packet_sizes, is_ip6,
+ True, True, add_eh, stateful_icmp)
+ # We want the primary action to be "deny" rather than reflect
+ acl_idx_rev = self.create_acls_for_a_stream(stream_dict_rev,
+ reflect_on_l2, False)
+
+ if primary_is_bridged_to_routed:
+ inbound_l2_acl = acl_idx_fwd['L2']
+ else:
+ inbound_l2_acl = acl_idx_rev['L2']
+
+ if primary_is_routed_to_bridged:
+ outbound_l2_acl = acl_idx_fwd['L2']
+ else:
+ outbound_l2_acl = acl_idx_rev['L2']
+
+ if primary_is_routed_to_bridged:
+ inbound_l3_acl = acl_idx_fwd['L3']
+ else:
+ inbound_l3_acl = acl_idx_rev['L3']
+
+ if primary_is_bridged_to_routed:
+ outbound_l3_acl = acl_idx_fwd['L3']
+ else:
+ outbound_l3_acl = acl_idx_rev['L3']
+
+ acl_if_pg2 = VppAclInterface(self, sw_if_index=self.pg2.sw_if_index,
+ n_input=1,
+ acls=[inbound_l3_acl, outbound_l3_acl])
+ acl_if_pg2.add_vpp_config()
+
+ acl_if_pg0 = VppAclInterface(self, sw_if_index=self.pg0.sw_if_index,
+ n_input=1,
+ acls=[inbound_l2_acl, outbound_l2_acl])
+ acl_if_pg0.add_vpp_config()
+
+ acl_if_pg1 = VppAclInterface(self, sw_if_index=self.pg1.sw_if_index,
+ n_input=1,
+ acls=[inbound_l2_acl, outbound_l2_acl])
+ acl_if_pg1.add_vpp_config()
+
+ self.applied_acl_shuffle(acl_if_pg0)
+ self.applied_acl_shuffle(acl_if_pg2)
+
+ def apply_acl_ip46_routed_to_bridged(self, test_l2_deny, is_ip6,
+ is_reflect, add_eh):
+ return self.apply_acl_ip46_x_to_y(False, test_l2_deny, is_ip6,
+ is_reflect, add_eh)
+
+ def apply_acl_ip46_bridged_to_routed(self, test_l2_deny, is_ip6,
+ is_reflect, add_eh):
+ return self.apply_acl_ip46_x_to_y(True, test_l2_deny, is_ip6,
+ is_reflect, add_eh)
+
+ def verify_acl_packet_count(self, acl_idx, packet_count):
+ matches = self.statistics.get_counter('/acl/%d/matches' % acl_idx)
+ self.logger.info("stat seg for ACL %d: %s" % (acl_idx, repr(matches)))
+ total_count = 0
+ for m in matches:
+ for p in m:
+ total_count = total_count + p['packets']
+ self.assertEqual(total_count, packet_count)
+
+ def run_traffic_ip46_x_to_y(self, bridged_to_routed,
+ test_l2_deny, is_ip6,
+ is_reflect, is_established, add_eh,
+ stateful_icmp=False):
+ self.reset_packet_infos()
+ stream_dict = self.create_stream(self.pg2, self.loop0,
+ bridged_to_routed,
+ self.pg_if_packet_sizes, is_ip6,
+ not is_reflect, is_established,
+ add_eh, stateful_icmp)
+ stream = stream_dict['stream']
+
+ tx_if = self.pg0 if bridged_to_routed else self.pg2
+ rx_if = self.pg2 if bridged_to_routed else self.pg0
+
+ tx_if.add_stream(stream)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ packet_count = self.get_packet_count_for_if_idx(self.loop0.sw_if_index)
+ rcvd1 = rx_if.get_capture(packet_count)
+ self.verify_capture(self.loop0, self.pg2, rcvd1, bridged_to_routed)
+ return len(stream)
+
+ def run_traffic_ip46_routed_to_bridged(self, test_l2_deny, is_ip6,
+ is_reflect, is_established, add_eh,
+ stateful_icmp=False):
+ return self.run_traffic_ip46_x_to_y(False, test_l2_deny, is_ip6,
+ is_reflect, is_established, add_eh,
+ stateful_icmp)
+
+ def run_traffic_ip46_bridged_to_routed(self, test_l2_deny, is_ip6,
+ is_reflect, is_established, add_eh,
+ stateful_icmp=False):
+ return self.run_traffic_ip46_x_to_y(True, test_l2_deny, is_ip6,
+ is_reflect, is_established, add_eh,
+ stateful_icmp)
+
+ def run_test_ip46_routed_to_bridged(self, test_l2_deny,
+ is_ip6, is_reflect, add_eh):
+ acls = self.apply_acl_ip46_routed_to_bridged(test_l2_deny,
+ is_ip6, is_reflect,
+ add_eh)
+ pkts = self.run_traffic_ip46_routed_to_bridged(test_l2_deny, is_ip6,
+ is_reflect, False,
+ add_eh)
+ self.verify_acl_packet_count(acls['L3'].acl_index, pkts)
+
+ def run_test_ip46_bridged_to_routed(self, test_l2_deny,
+ is_ip6, is_reflect, add_eh):
+ acls = self.apply_acl_ip46_bridged_to_routed(test_l2_deny,
+ is_ip6, is_reflect,
+ add_eh)
+ pkts = self.run_traffic_ip46_bridged_to_routed(test_l2_deny, is_ip6,
+ is_reflect, False,
+ add_eh)
+ self.verify_acl_packet_count(acls['L2'].acl_index, pkts)
+
+ def run_test_ip46_routed_to_bridged_and_back(self, test_l2_action,
+ is_ip6, add_eh,
+ stateful_icmp=False):
+ self.apply_acl_ip46_both_directions_reflect(False, test_l2_action,
+ is_ip6, add_eh,
+ stateful_icmp)
+ self.run_traffic_ip46_routed_to_bridged(test_l2_action, is_ip6,
+ True, False, add_eh,
+ stateful_icmp)
+ self.run_traffic_ip46_bridged_to_routed(test_l2_action, is_ip6,
+ False, True, add_eh,
+ stateful_icmp)
+
+ def run_test_ip46_bridged_to_routed_and_back(self, test_l2_action,
+ is_ip6, add_eh,
+ stateful_icmp=False):
+ self.apply_acl_ip46_both_directions_reflect(True, test_l2_action,
+ is_ip6, add_eh,
+ stateful_icmp)
+ self.run_traffic_ip46_bridged_to_routed(test_l2_action, is_ip6,
+ True, False, add_eh,
+ stateful_icmp)
+ self.run_traffic_ip46_routed_to_bridged(test_l2_action, is_ip6,
+ False, True, add_eh,
+ stateful_icmp)
+
+ def test_0000_ip6_irb_1(self):
+ """ ACL plugin prepare"""
+ if not self.vpp_dead:
+ cmd = "set acl-plugin session timeout udp idle 2000"
+ self.logger.info(self.vapi.ppcli(cmd))
+ # uncomment to not skip past the routing header
+ # and watch the EH tests fail
+ # self.logger.info(self.vapi.ppcli(
+ # "set acl-plugin skip-ipv6-extension-header 43 0"))
+ # uncomment to test the session limit (stateful tests will fail)
+ # self.logger.info(self.vapi.ppcli(
+ # "set acl-plugin session table max-entries 1"))
+ # new datapath is the default, but just in case
+ # self.logger.info(self.vapi.ppcli(
+ # "set acl-plugin l2-datapath new"))
+ # If you want to see some tests fail, uncomment the next line
+ # self.logger.info(self.vapi.ppcli(
+ # "set acl-plugin l2-datapath old"))
+
+ def test_0001_ip6_irb_1(self):
+ """ ACL IPv6 routed -> bridged, L2 ACL deny"""
+ self.run_test_ip46_routed_to_bridged(True, True, False,
+ self.WITHOUT_EH)
+
+ def test_0002_ip6_irb_1(self):
+ """ ACL IPv6 routed -> bridged, L3 ACL deny"""
+ self.run_test_ip46_routed_to_bridged(False, True, False,
+ self.WITHOUT_EH)
+
+ def test_0003_ip4_irb_1(self):
+ """ ACL IPv4 routed -> bridged, L2 ACL deny"""
+ self.run_test_ip46_routed_to_bridged(True, False, False,
+ self.WITHOUT_EH)
+
+ def test_0004_ip4_irb_1(self):
+ """ ACL IPv4 routed -> bridged, L3 ACL deny"""
+ self.run_test_ip46_routed_to_bridged(False, False, False,
+ self.WITHOUT_EH)
+
+ def test_0005_ip6_irb_1(self):
+ """ ACL IPv6 bridged -> routed, L2 ACL deny """
+ self.run_test_ip46_bridged_to_routed(True, True, False,
+ self.WITHOUT_EH)
+
+ def test_0006_ip6_irb_1(self):
+ """ ACL IPv6 bridged -> routed, L3 ACL deny """
+ self.run_test_ip46_bridged_to_routed(False, True, False,
+ self.WITHOUT_EH)
+
+ def test_0007_ip6_irb_1(self):
+ """ ACL IPv4 bridged -> routed, L2 ACL deny """
+ self.run_test_ip46_bridged_to_routed(True, False, False,
+ self.WITHOUT_EH)
+
+ def test_0008_ip6_irb_1(self):
+ """ ACL IPv4 bridged -> routed, L3 ACL deny """
+ self.run_test_ip46_bridged_to_routed(False, False, False,
+ self.WITHOUT_EH)
+
+ # Stateful ACL tests
+ def test_0101_ip6_irb_1(self):
+ """ ACL IPv6 routed -> bridged, L2 ACL permit+reflect"""
+ self.run_test_ip46_routed_to_bridged_and_back(True, True,
+ self.WITHOUT_EH)
+
+ def test_0102_ip6_irb_1(self):
+ """ ACL IPv6 bridged -> routed, L2 ACL permit+reflect"""
+ self.run_test_ip46_bridged_to_routed_and_back(True, True,
+ self.WITHOUT_EH)
+
+ def test_0103_ip6_irb_1(self):
+ """ ACL IPv4 routed -> bridged, L2 ACL permit+reflect"""
+ self.run_test_ip46_routed_to_bridged_and_back(True, False,
+ self.WITHOUT_EH)
+
+ def test_0104_ip6_irb_1(self):
+ """ ACL IPv4 bridged -> routed, L2 ACL permit+reflect"""
+ self.run_test_ip46_bridged_to_routed_and_back(True, False,
+ self.WITHOUT_EH)
+
+ def test_0111_ip6_irb_1(self):
+ """ ACL IPv6 routed -> bridged, L3 ACL permit+reflect"""
+ self.run_test_ip46_routed_to_bridged_and_back(False, True,
+ self.WITHOUT_EH)
+
+ def test_0112_ip6_irb_1(self):
+ """ ACL IPv6 bridged -> routed, L3 ACL permit+reflect"""
+ self.run_test_ip46_bridged_to_routed_and_back(False, True,
+ self.WITHOUT_EH)
+
+ def test_0113_ip6_irb_1(self):
+ """ ACL IPv4 routed -> bridged, L3 ACL permit+reflect"""
+ self.run_test_ip46_routed_to_bridged_and_back(False, False,
+ self.WITHOUT_EH)
+
+ def test_0114_ip6_irb_1(self):
+ """ ACL IPv4 bridged -> routed, L3 ACL permit+reflect"""
+ self.run_test_ip46_bridged_to_routed_and_back(False, False,
+ self.WITHOUT_EH)
+
+ # A block of tests with extension headers
+
+ def test_1001_ip6_irb_1(self):
+ """ ACL IPv6+EH routed -> bridged, L2 ACL deny"""
+ self.run_test_ip46_routed_to_bridged(True, True, False,
+ self.WITH_EH)
+
+ def test_1002_ip6_irb_1(self):
+ """ ACL IPv6+EH routed -> bridged, L3 ACL deny"""
+ self.run_test_ip46_routed_to_bridged(False, True, False,
+ self.WITH_EH)
+
+ def test_1005_ip6_irb_1(self):
+ """ ACL IPv6+EH bridged -> routed, L2 ACL deny """
+ self.run_test_ip46_bridged_to_routed(True, True, False,
+ self.WITH_EH)
+
+ def test_1006_ip6_irb_1(self):
+ """ ACL IPv6+EH bridged -> routed, L3 ACL deny """
+ self.run_test_ip46_bridged_to_routed(False, True, False,
+ self.WITH_EH)
+
+ def test_1101_ip6_irb_1(self):
+ """ ACL IPv6+EH routed -> bridged, L2 ACL permit+reflect"""
+ self.run_test_ip46_routed_to_bridged_and_back(True, True,
+ self.WITH_EH)
+
+ def test_1102_ip6_irb_1(self):
+ """ ACL IPv6+EH bridged -> routed, L2 ACL permit+reflect"""
+ self.run_test_ip46_bridged_to_routed_and_back(True, True,
+ self.WITH_EH)
+
+ def test_1111_ip6_irb_1(self):
+ """ ACL IPv6+EH routed -> bridged, L3 ACL permit+reflect"""
+ self.run_test_ip46_routed_to_bridged_and_back(False, True,
+ self.WITH_EH)
+
+ def test_1112_ip6_irb_1(self):
+ """ ACL IPv6+EH bridged -> routed, L3 ACL permit+reflect"""
+ self.run_test_ip46_bridged_to_routed_and_back(False, True,
+ self.WITH_EH)
+
+ # IPv4 with "MF" bit set
+
+ def test_1201_ip6_irb_1(self):
+ """ ACL IPv4+MF routed -> bridged, L2 ACL deny"""
+ self.run_test_ip46_routed_to_bridged(True, False, False,
+ self.WITH_EH)
+
+ def test_1202_ip6_irb_1(self):
+ """ ACL IPv4+MF routed -> bridged, L3 ACL deny"""
+ self.run_test_ip46_routed_to_bridged(False, False, False,
+ self.WITH_EH)
+
+ def test_1205_ip6_irb_1(self):
+ """ ACL IPv4+MF bridged -> routed, L2 ACL deny """
+ self.run_test_ip46_bridged_to_routed(True, False, False,
+ self.WITH_EH)
+
+ def test_1206_ip6_irb_1(self):
+ """ ACL IPv4+MF bridged -> routed, L3 ACL deny """
+ self.run_test_ip46_bridged_to_routed(False, False, False,
+ self.WITH_EH)
+
+ def test_1301_ip6_irb_1(self):
+ """ ACL IPv4+MF routed -> bridged, L2 ACL permit+reflect"""
+ self.run_test_ip46_routed_to_bridged_and_back(True, False,
+ self.WITH_EH)
+
+ def test_1302_ip6_irb_1(self):
+ """ ACL IPv4+MF bridged -> routed, L2 ACL permit+reflect"""
+ self.run_test_ip46_bridged_to_routed_and_back(True, False,
+ self.WITH_EH)
+
+ def test_1311_ip6_irb_1(self):
+ """ ACL IPv4+MF routed -> bridged, L3 ACL permit+reflect"""
+ self.run_test_ip46_routed_to_bridged_and_back(False, False,
+ self.WITH_EH)
+
+ def test_1312_ip6_irb_1(self):
+ """ ACL IPv4+MF bridged -> routed, L3 ACL permit+reflect"""
+ self.run_test_ip46_bridged_to_routed_and_back(False, False,
+ self.WITH_EH)
+ # Stateful ACL tests with stateful ICMP
+
+ def test_1401_ip6_irb_1(self):
+ """ IPv6 routed -> bridged, L2 ACL permit+reflect, ICMP reflect"""
+ self.run_test_ip46_routed_to_bridged_and_back(True, True,
+ self.WITHOUT_EH,
+ self.STATEFUL_ICMP)
+
+ def test_1402_ip6_irb_1(self):
+ """ IPv6 bridged -> routed, L2 ACL permit+reflect, ICMP reflect"""
+ self.run_test_ip46_bridged_to_routed_and_back(True, True,
+ self.WITHOUT_EH,
+ self.STATEFUL_ICMP)
+
+ def test_1403_ip4_irb_1(self):
+ """ IPv4 routed -> bridged, L2 ACL permit+reflect, ICMP reflect"""
+ self.run_test_ip46_routed_to_bridged_and_back(True, False,
+ self.WITHOUT_EH,
+ self.STATEFUL_ICMP)
+
+ def test_1404_ip4_irb_1(self):
+ """ IPv4 bridged -> routed, L2 ACL permit+reflect, ICMP reflect"""
+ self.run_test_ip46_bridged_to_routed_and_back(True, False,
+ self.WITHOUT_EH,
+ self.STATEFUL_ICMP)
+
+ def test_1411_ip6_irb_1(self):
+ """ IPv6 routed -> bridged, L3 ACL permit+reflect, ICMP reflect"""
+ self.run_test_ip46_routed_to_bridged_and_back(False, True,
+ self.WITHOUT_EH,
+ self.STATEFUL_ICMP)
+
+ def test_1412_ip6_irb_1(self):
+ """ IPv6 bridged -> routed, L3 ACL permit+reflect, ICMP reflect"""
+ self.run_test_ip46_bridged_to_routed_and_back(False, True,
+ self.WITHOUT_EH,
+ self.STATEFUL_ICMP)
+
+ def test_1413_ip4_irb_1(self):
+ """ IPv4 routed -> bridged, L3 ACL permit+reflect, ICMP reflect"""
+ self.run_test_ip46_routed_to_bridged_and_back(False, False,
+ self.WITHOUT_EH,
+ self.STATEFUL_ICMP)
+
+ def test_1414_ip4_irb_1(self):
+ """ IPv4 bridged -> routed, L3 ACL permit+reflect, ICMP reflect"""
+ self.run_test_ip46_bridged_to_routed_and_back(False, False,
+ self.WITHOUT_EH,
+ self.STATEFUL_ICMP)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_acl_plugin_macip.py b/test/test_acl_plugin_macip.py
new file mode 100644
index 00000000000..5edd7b03258
--- /dev/null
+++ b/test/test_acl_plugin_macip.py
@@ -0,0 +1,1278 @@
+#!/usr/bin/env python3
+from __future__ import print_function
+"""ACL plugin - MACIP tests
+"""
+import binascii
+import ipaddress
+import random
+from socket import inet_ntop, inet_pton, AF_INET, AF_INET6
+from struct import pack, unpack
+import re
+import unittest
+from ipaddress import ip_network, IPv4Network, IPv6Network
+
+import scapy.compat
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, UDP
+from scapy.layers.inet6 import IPv6
+
+from framework import VppTestCase, VppTestRunner, running_extended_tests
+from vpp_lo_interface import VppLoInterface
+from vpp_l2 import L2_PORT_TYPE
+from vpp_sub_interface import L2_VTR_OP, VppSubInterface, VppDot1QSubint, \
+ VppDot1ADSubint
+from vpp_acl import AclRule, VppAcl, VppAclInterface, VppEtypeWhitelist, \
+ VppMacipAclInterface, VppMacipAcl, MacipRule
+from vpp_papi import MACAddress
+
+
+class MethodHolder(VppTestCase):
+ DEBUG = False
+
+ BRIDGED = True
+ ROUTED = False
+
+ IS_IP4 = False
+ IS_IP6 = True
+
+ DOT1AD = "dot1ad"
+ DOT1Q = "dot1q"
+ PERMIT_TAGS = True
+ DENY_TAGS = False
+
+ # rule types
+ DENY = 0
+ PERMIT = 1
+
+ # ACL types
+ EXACT_IP = 1
+ SUBNET_IP = 2
+ WILD_IP = 3
+
+ EXACT_MAC = 1
+ WILD_MAC = 2
+ OUI_MAC = 3
+
+ ACLS = []
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Perform standard class setup (defined by class method setUpClass in
+ class VppTestCase) before running the test case, set test case related
+ variables and configure VPP.
+ """
+ super(MethodHolder, cls).setUpClass()
+
+ cls.pg_if_packet_sizes = [64, 512, 1518, 9018] # packet sizes
+ cls.bd_id = 111
+ cls.remote_hosts_count = 200
+
+ try:
+ # create 4 pg interfaces, 1 loopback interface
+ cls.create_pg_interfaces(range(4))
+ cls.create_loopback_interfaces(1)
+
+ # create 2 subinterfaces
+ cls.subifs = [
+ VppDot1QSubint(cls, cls.pg1, 10),
+ VppDot1ADSubint(cls, cls.pg2, 20, 300, 400),
+ VppDot1QSubint(cls, cls.pg3, 30),
+ VppDot1ADSubint(cls, cls.pg3, 40, 600, 700)]
+
+ cls.subifs[0].set_vtr(L2_VTR_OP.L2_POP_1,
+ inner=10, push1q=1)
+ cls.subifs[1].set_vtr(L2_VTR_OP.L2_POP_2,
+ outer=300, inner=400, push1q=1)
+ cls.subifs[2].set_vtr(L2_VTR_OP.L2_POP_1,
+ inner=30, push1q=1)
+ cls.subifs[3].set_vtr(L2_VTR_OP.L2_POP_2,
+ outer=600, inner=700, push1q=1)
+
+ cls.interfaces = list(cls.pg_interfaces)
+ cls.interfaces.extend(cls.lo_interfaces)
+ cls.interfaces.extend(cls.subifs)
+
+ for i in cls.interfaces:
+ i.admin_up()
+
+ # Create BD with MAC learning enabled and put interfaces to this BD
+ cls.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=cls.loop0.sw_if_index, bd_id=cls.bd_id,
+ port_type=L2_PORT_TYPE.BVI)
+ cls.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=cls.pg0.sw_if_index, bd_id=cls.bd_id)
+ cls.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=cls.pg1.sw_if_index, bd_id=cls.bd_id)
+ cls.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=cls.subifs[0].sw_if_index, bd_id=cls.bd_id)
+ cls.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=cls.subifs[1].sw_if_index, bd_id=cls.bd_id)
+
+ # Configure IPv4/6 addresses on loop interface and routed interface
+ cls.loop0.config_ip4()
+ cls.loop0.config_ip6()
+ cls.pg2.config_ip4()
+ cls.pg2.config_ip6()
+ cls.pg3.config_ip4()
+ cls.pg3.config_ip6()
+
+ # Configure MAC address binding to IPv4 neighbors on loop0
+ cls.loop0.generate_remote_hosts(cls.remote_hosts_count)
+ # Modify host mac addresses to have different OUI parts
+ for i in range(2, cls.remote_hosts_count + 2):
+ mac = cls.loop0.remote_hosts[i-2]._mac.split(':')
+ mac[2] = format(int(mac[2], 16) + i, "02x")
+ cls.loop0.remote_hosts[i - 2]._mac = ":".join(mac)
+
+ cls.loop0.configure_ipv4_neighbors()
+ cls.loop0.configure_ipv6_neighbors()
+
+ # configure MAC address on pg3
+ cls.pg3.resolve_arp()
+ cls.pg3.resolve_ndp()
+
+ # configure MAC address on subifs
+ for i in cls.subifs:
+ i.config_ip4()
+ i.resolve_arp()
+ i.config_ip6()
+
+ # configure MAC address on pg2
+ cls.pg2.resolve_arp()
+ cls.pg2.resolve_ndp()
+
+ # Loopback BVI interface has remote hosts
+ # one half of hosts are behind pg0 second behind pg1,pg2,pg3 subifs
+ cls.pg0.remote_hosts = cls.loop0.remote_hosts[:100]
+ cls.subifs[0].remote_hosts = cls.loop0.remote_hosts[100:125]
+ cls.subifs[1].remote_hosts = cls.loop0.remote_hosts[125:150]
+ cls.subifs[2].remote_hosts = cls.loop0.remote_hosts[150:175]
+ cls.subifs[3].remote_hosts = cls.loop0.remote_hosts[175:]
+
+ except Exception:
+ super(MethodHolder, cls).tearDownClass()
+ raise
+
+ @classmethod
+ def tearDownClass(cls):
+ super(MethodHolder, cls).tearDownClass()
+
+ def setUp(self):
+ super(MethodHolder, self).setUp()
+ self.reset_packet_infos()
+
+ def show_commands_at_teardown(self):
+ self.logger.info(self.vapi.ppcli("show interface address"))
+ self.logger.info(self.vapi.ppcli("show hardware"))
+ self.logger.info(self.vapi.ppcli("sh acl-plugin macip acl"))
+ self.logger.info(self.vapi.ppcli("sh acl-plugin macip interface"))
+ self.logger.info(self.vapi.ppcli("sh classify tables verbose"))
+ self.logger.info(self.vapi.ppcli("sh acl-plugin acl"))
+ self.logger.info(self.vapi.ppcli("sh acl-plugin interface"))
+ self.logger.info(self.vapi.ppcli("sh acl-plugin tables"))
+ # print(self.vapi.ppcli("show interface address"))
+ # print(self.vapi.ppcli("show hardware"))
+ # print(self.vapi.ppcli("sh acl-plugin macip interface"))
+ # print(self.vapi.ppcli("sh acl-plugin macip acl"))
+
+ def macip_acl_dump_debug(self):
+ acls = self.vapi.macip_acl_dump()
+ if self.DEBUG:
+ for acl in acls:
+ # print("ACL #"+str(acl.acl_index))
+ for r in acl.r:
+ rule = "ACTION"
+ if r.is_permit == 1:
+ rule = "PERMIT"
+ elif r.is_permit == 0:
+ rule = "DENY "
+ """
+ print(" IP6" if r.is_ipv6 else " IP4",
+ rule,
+ binascii.hexlify(r.src_mac),
+ binascii.hexlify(r.src_mac_mask),
+ unpack('<16B', r.src_ip_addr),
+ r.src_ip_prefix_len)
+ """
+ return acls
+
+ def create_rules(self, mac_type=EXACT_MAC, ip_type=EXACT_IP,
+ acl_count=1, rules_count=None):
+ acls = []
+ if rules_count is None:
+ rules_count = [1]
+ src_mac = int("220000dead00", 16)
+ for acl in range(2, (acl_count+1) * 2):
+ rules = []
+ host = random.choice(self.loop0.remote_hosts)
+ is_ip6 = acl % 2
+ ip4 = host.ip4.split('.')
+ ip6 = list(unpack('<16B', inet_pton(AF_INET6, host.ip6)))
+
+ if ip_type == self.EXACT_IP:
+ prefix_len4 = 32
+ prefix_len6 = 128
+ elif ip_type == self.WILD_IP:
+ ip4 = [0, 0, 0, 0]
+ ip6 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ prefix_len4 = 0
+ prefix_len6 = 0
+ rules_count[int((acl / 2) - 1)] = 1
+ else:
+ prefix_len4 = 24
+ prefix_len6 = 64
+
+ if mac_type == self.EXACT_MAC:
+ mask = "ff:ff:ff:ff:ff:ff"
+ elif mac_type == self.WILD_MAC:
+ mask = "00:00:00:00:00:00"
+ elif mac_type == self.OUI_MAC:
+ mask = "ff:ff:ff:00:00:00"
+ else:
+ mask = "ff:ff:ff:ff:ff:00"
+
+ ip = ip6 if is_ip6 else ip4
+ ip_len = prefix_len6 if is_ip6 else prefix_len4
+
+ for i in range(0, (rules_count[int((acl / 2) - 1)])):
+ src_mac += 16777217
+ if mac_type == self.WILD_MAC:
+ mac = "00:00:00:00:00:00"
+ elif mac_type == self.OUI_MAC:
+ mac = ':'.join(re.findall('..', '{:02x}'.format(
+ src_mac))[:3])+":00:00:00"
+ else:
+ mac = ':'.join(re.findall(
+ '..', '{:02x}'.format(src_mac)))
+
+ if ip_type == self.EXACT_IP:
+ ip4[3] = random.randint(100, 200)
+ ip6[15] = random.randint(100, 200)
+ elif ip_type == self.SUBNET_IP:
+ ip4[2] = random.randint(100, 200)
+ ip4[3] = 0
+ ip6[7] = random.randint(100, 200)
+ ip6[15] = 0
+ ip_pack = b''
+ for j in range(0, len(ip)):
+ ip_pack += pack('<B', int(ip[j]))
+
+ rule = MacipRule(is_permit=self.PERMIT,
+ src_prefix=ip_network((ip_pack, ip_len)),
+ src_mac=MACAddress(mac).packed,
+ src_mac_mask=MACAddress(mask).packed)
+ rules.append(rule)
+ if ip_type == self.WILD_IP:
+ break
+
+ acls.append(rules)
+ src_mac += 1099511627776
+ return acls
+
+ def apply_macip_rules(self, acls):
+ macip_acls = []
+ for acl in acls:
+ macip_acl = VppMacipAcl(self, rules=acl)
+ macip_acl.add_vpp_config()
+ macip_acls.append(macip_acl)
+ return macip_acls
+
+ def verify_macip_acls(self, acl_count, rules_count, expected_count=2):
+ reply = self.macip_acl_dump_debug()
+ for acl in range(2, (acl_count+1) * 2):
+ self.assertEqual(reply[acl - 2].count, rules_count[acl//2-1])
+
+ self.vapi.macip_acl_interface_get()
+
+ self.vapi.macip_acl_interface_add_del(sw_if_index=0, acl_index=0)
+ self.vapi.macip_acl_interface_add_del(sw_if_index=1, acl_index=1)
+
+ reply = self.vapi.macip_acl_interface_get()
+ self.assertEqual(reply.count, expected_count)
+
+ def create_stream(self, mac_type, ip_type, packet_count,
+ src_if, dst_if, traffic, is_ip6, tags=PERMIT_TAGS):
+ # exact MAC and exact IP
+ # exact MAC and subnet of IPs
+ # exact MAC and wildcard IP
+ # wildcard MAC and exact IP
+ # wildcard MAC and subnet of IPs
+ # wildcard MAC and wildcard IP
+ # OUI restricted MAC and exact IP
+ # OUI restricted MAC and subnet of IPs
+ # OUI restricted MAC and wildcard IP
+
+ packets = []
+ macip_rules = []
+ acl_rules = []
+ ip_permit = ""
+ mac_permit = ""
+ dst_mac = ""
+ mac_rule = "00:00:00:00:00:00"
+ mac_mask = "00:00:00:00:00:00"
+ for p in range(0, packet_count):
+ remote_dst_index = p % len(dst_if.remote_hosts)
+ remote_dst_host = dst_if.remote_hosts[remote_dst_index]
+
+ dst_port = 1234 + p
+ src_port = 4321 + p
+ is_permit = self.PERMIT if p % 3 == 0 else self.DENY
+ denyMAC = True if not is_permit and p % 3 == 1 else False
+ denyIP = True if not is_permit and p % 3 == 2 else False
+ if not is_permit and ip_type == self.WILD_IP:
+ denyMAC = True
+ if not is_permit and mac_type == self.WILD_MAC:
+ denyIP = True
+
+ if traffic == self.BRIDGED:
+ if is_permit:
+ src_mac = remote_dst_host._mac
+ dst_mac = 'de:ad:00:00:00:00'
+ src_ip4 = remote_dst_host.ip4
+ dst_ip4 = src_if.remote_ip4
+ src_ip6 = remote_dst_host.ip6
+ dst_ip6 = src_if.remote_ip6
+ ip_permit = src_ip6 if is_ip6 else src_ip4
+ mac_permit = src_mac
+ if denyMAC:
+ mac = src_mac.split(':')
+ mac[0] = format(int(mac[0], 16)+1, "02x")
+ src_mac = ":".join(mac)
+ if is_ip6:
+ src_ip6 = ip_permit
+ else:
+ src_ip4 = ip_permit
+ if denyIP:
+ if ip_type != self.WILD_IP:
+ src_mac = mac_permit
+ src_ip4 = remote_dst_host.ip4
+ dst_ip4 = src_if.remote_ip4
+ src_ip6 = remote_dst_host.ip6
+ dst_ip6 = src_if.remote_ip6
+ else:
+ if is_permit:
+ src_mac = remote_dst_host._mac
+ dst_mac = src_if.local_mac
+ src_ip4 = src_if.remote_ip4
+ dst_ip4 = remote_dst_host.ip4
+ src_ip6 = src_if.remote_ip6
+ dst_ip6 = remote_dst_host.ip6
+ ip_permit = src_ip6 if is_ip6 else src_ip4
+ mac_permit = src_mac
+ if denyMAC:
+ mac = src_mac.split(':')
+ mac[0] = format(int(mac[0], 16) + 1, "02x")
+ src_mac = ":".join(mac)
+ if is_ip6:
+ src_ip6 = ip_permit
+ else:
+ src_ip4 = ip_permit
+ if denyIP:
+ src_mac = remote_dst_host._mac
+ if ip_type != self.WILD_IP:
+ src_mac = mac_permit
+ src_ip4 = remote_dst_host.ip4
+ dst_ip4 = src_if.remote_ip4
+ src_ip6 = remote_dst_host.ip6
+ dst_ip6 = src_if.remote_ip6
+
+ if is_permit:
+ info = self.create_packet_info(src_if, dst_if)
+ payload = self.info_to_payload(info)
+ else:
+ payload = "to be blocked"
+
+ if mac_type == self.WILD_MAC:
+ mac = src_mac.split(':')
+ for i in range(1, 5):
+ mac[i] = format(random.randint(0, 255), "02x")
+ src_mac = ":".join(mac)
+
+ # create packet
+ packet = Ether(src=src_mac, dst=dst_mac)
+ ip_rule = src_ip6 if is_ip6 else src_ip4
+ if is_ip6:
+ if ip_type != self.EXACT_IP:
+ sub_ip = list(unpack('<16B', inet_pton(AF_INET6, ip_rule)))
+ if ip_type == self.WILD_IP:
+ sub_ip[0] = random.randint(240, 254)
+ sub_ip[1] = random.randint(230, 239)
+ sub_ip[14] = random.randint(100, 199)
+ sub_ip[15] = random.randint(200, 255)
+ elif ip_type == self.SUBNET_IP:
+ if denyIP:
+ sub_ip[2] = int(sub_ip[2]) + 1
+ sub_ip[14] = random.randint(100, 199)
+ sub_ip[15] = random.randint(200, 255)
+ packed_src_ip6 = b''.join(
+ [scapy.compat.chb(x) for x in sub_ip])
+ src_ip6 = inet_ntop(AF_INET6, packed_src_ip6)
+ packet /= IPv6(src=src_ip6, dst=dst_ip6)
+ else:
+ if ip_type != self.EXACT_IP:
+ sub_ip = ip_rule.split('.')
+ if ip_type == self.WILD_IP:
+ sub_ip[0] = random.randint(1, 49)
+ sub_ip[1] = random.randint(50, 99)
+ sub_ip[2] = random.randint(100, 199)
+ sub_ip[3] = random.randint(200, 255)
+ elif ip_type == self.SUBNET_IP:
+ if denyIP:
+ sub_ip[1] = int(sub_ip[1])+1
+ sub_ip[2] = random.randint(100, 199)
+ sub_ip[3] = random.randint(200, 255)
+ src_ip4 = '.'.join(['{!s}'.format(x) for x in sub_ip])
+ packet /= IP(src=src_ip4, dst=dst_ip4, frag=0, flags=0)
+
+ packet /= UDP(sport=src_port, dport=dst_port)/Raw(payload)
+
+ packet[Raw].load += b" mac:%s" % src_mac.encode('utf-8')
+
+ size = self.pg_if_packet_sizes[p % len(self.pg_if_packet_sizes)]
+ if isinstance(src_if, VppSubInterface):
+ size = size + 4
+ if isinstance(src_if, VppDot1QSubint):
+ if src_if is self.subifs[0]:
+ if tags == self.PERMIT_TAGS:
+ packet = src_if.add_dot1q_layer(packet, 10)
+ else:
+ packet = src_if.add_dot1q_layer(packet, 11)
+ else:
+ if tags == self.PERMIT_TAGS:
+ packet = src_if.add_dot1q_layer(packet, 30)
+ else:
+ packet = src_if.add_dot1q_layer(packet, 33)
+ elif isinstance(src_if, VppDot1ADSubint):
+ if src_if is self.subifs[1]:
+ if tags == self.PERMIT_TAGS:
+ packet = src_if.add_dot1ad_layer(packet, 300, 400)
+ else:
+ packet = src_if.add_dot1ad_layer(packet, 333, 444)
+ else:
+ if tags == self.PERMIT_TAGS:
+ packet = src_if.add_dot1ad_layer(packet, 600, 700)
+ else:
+ packet = src_if.add_dot1ad_layer(packet, 666, 777)
+ self.extend_packet(packet, size)
+ packets.append(packet)
+
+ # create suitable MACIP rule
+ if mac_type == self.EXACT_MAC:
+ mac_rule = src_mac
+ mac_mask = "ff:ff:ff:ff:ff:ff"
+ elif mac_type == self.WILD_MAC:
+ mac_rule = "00:00:00:00:00:00"
+ mac_mask = "00:00:00:00:00:00"
+ elif mac_type == self.OUI_MAC:
+ mac = src_mac.split(':')
+ mac[3] = mac[4] = mac[5] = '00'
+ mac_rule = ":".join(mac)
+ mac_mask = "ff:ff:ff:00:00:00"
+
+ if is_ip6:
+ if ip_type == self.WILD_IP:
+ ip = "0::0"
+ else:
+ ip = src_ip6
+ if ip_type == self.SUBNET_IP:
+ sub_ip = list(unpack('<16B', inet_pton(AF_INET6, ip)))
+ for i in range(8, 16):
+ sub_ip[i] = 0
+ packed_ip = b''.join(
+ [scapy.compat.chb(x) for x in sub_ip])
+ ip = inet_ntop(AF_INET6, packed_ip)
+ else:
+ if ip_type == self.WILD_IP:
+ ip = "0.0.0.0"
+ else:
+ ip = src_ip4
+ if ip_type == self.SUBNET_IP:
+ sub_ip = ip.split('.')
+ sub_ip[2] = sub_ip[3] = '0'
+ ip = ".".join(sub_ip)
+
+ prefix_len = 128 if is_ip6 else 32
+ if ip_type == self.WILD_IP:
+ prefix_len = 0
+ elif ip_type == self.SUBNET_IP:
+ prefix_len = 64 if is_ip6 else 16
+ ip_rule = inet_pton(AF_INET6 if is_ip6 else AF_INET, ip)
+
+ # create suitable ACL rule
+ if is_permit:
+ rule_l4_sport = packet[UDP].sport
+ rule_l4_dport = packet[UDP].dport
+ rule_family = AF_INET6 if packet.haslayer(IPv6) else AF_INET
+ rule_prefix_len = 128 if packet.haslayer(IPv6) else 32
+ rule_l3_layer = IPv6 if packet.haslayer(IPv6) else IP
+ if packet.haslayer(IPv6):
+ rule_l4_proto = packet[UDP].overload_fields[IPv6]['nh']
+ else:
+ rule_l4_proto = packet[IP].proto
+
+ src_network = ip_network(
+ (packet[rule_l3_layer].src, rule_prefix_len))
+ dst_network = ip_network(
+ (packet[rule_l3_layer].dst, rule_prefix_len))
+ acl_rule = AclRule(is_permit=is_permit, proto=rule_l4_proto,
+ src_prefix=src_network,
+ dst_prefix=dst_network,
+ sport_from=rule_l4_sport,
+ sport_to=rule_l4_sport,
+ dport_from=rule_l4_dport,
+ dport_to=rule_l4_dport)
+ acl_rules.append(acl_rule)
+
+ if mac_type == self.WILD_MAC and ip_type == self.WILD_IP and p > 0:
+ continue
+
+ if is_permit:
+ macip_rule = MacipRule(
+ is_permit=is_permit,
+ src_prefix=ip_network(
+ (ip_rule, prefix_len)),
+ src_mac=MACAddress(mac_rule).packed,
+ src_mac_mask=MACAddress(mac_mask).packed)
+ macip_rules.append(macip_rule)
+
+ # deny all other packets
+ if not (mac_type == self.WILD_MAC and ip_type == self.WILD_IP):
+ network = IPv6Network((0, 0)) if is_ip6 else IPv4Network((0, 0))
+ macip_rule = MacipRule(
+ is_permit=0,
+ src_prefix=network,
+ src_mac=MACAddress("00:00:00:00:00:00").packed,
+ src_mac_mask=MACAddress("00:00:00:00:00:00").packed)
+ macip_rules.append(macip_rule)
+
+ network = IPv6Network((0, 0)) if is_ip6 else IPv4Network((0, 0))
+ acl_rule = AclRule(is_permit=0, src_prefix=network, dst_prefix=network,
+ sport_from=0, sport_to=0, dport_from=0, dport_to=0)
+ acl_rules.append(acl_rule)
+ return {'stream': packets,
+ 'macip_rules': macip_rules,
+ 'acl_rules': acl_rules}
+
+ def verify_capture(self, stream, capture, is_ip6):
+ """
+ :param stream:
+ :param capture:
+ :param is_ip6:
+ :return:
+ """
+ # p_l3 = IPv6 if is_ip6 else IP
+ # if self.DEBUG:
+ # for p in stream:
+ # print(p[Ether].src, p[Ether].dst, p[p_l3].src, p[p_l3].dst)
+ #
+ # acls = self.macip_acl_dump_debug()
+
+ # TODO : verify
+ # for acl in acls:
+ # for r in acl.r:
+ # print(binascii.hexlify(r.src_mac), \
+ # binascii.hexlify(r.src_mac_mask),\
+ # unpack('<16B', r.src_ip_addr), \
+ # r.src_ip_prefix_len)
+ #
+ # for p in capture:
+ # print(p[Ether].src, p[Ether].dst, p[p_l3].src, p[p_l3].dst
+ # data = p[Raw].load.split(':',1)[1])
+ # print(p[p_l3].src, data)
+
+ def run_traffic(self, mac_type, ip_type, traffic, is_ip6, packets,
+ do_not_expected_capture=False, tags=None,
+ apply_rules=True, isMACIP=True, permit_tags=PERMIT_TAGS,
+ try_replace=False):
+ self.reset_packet_infos()
+
+ if tags is None:
+ tx_if = self.pg0 if traffic == self.BRIDGED else self.pg3
+ rx_if = self.pg3 if traffic == self.BRIDGED else self.pg0
+ src_if = self.pg3
+ dst_if = self.loop0
+ else:
+ if tags == self.DOT1Q:
+ if traffic == self.BRIDGED:
+ tx_if = self.subifs[0]
+ rx_if = self.pg0
+ src_if = self.subifs[0]
+ dst_if = self.loop0
+ else:
+ tx_if = self.subifs[2]
+ rx_if = self.pg0
+ src_if = self.subifs[2]
+ dst_if = self.loop0
+ elif tags == self.DOT1AD:
+ if traffic == self.BRIDGED:
+ tx_if = self.subifs[1]
+ rx_if = self.pg0
+ src_if = self.subifs[1]
+ dst_if = self.loop0
+ else:
+ tx_if = self.subifs[3]
+ rx_if = self.pg0
+ src_if = self.subifs[3]
+ dst_if = self.loop0
+ else:
+ return
+
+ test_dict = self.create_stream(mac_type, ip_type, packets,
+ src_if, dst_if,
+ traffic, is_ip6,
+ tags=permit_tags)
+
+ if apply_rules:
+ if isMACIP:
+ self.acl = VppMacipAcl(self, rules=test_dict['macip_rules'])
+ else:
+ self.acl = VppAcl(self, rules=test_dict['acl_rules'])
+ self.acl.add_vpp_config()
+
+ if isMACIP:
+ self.acl_if = VppMacipAclInterface(
+ self, sw_if_index=tx_if.sw_if_index, acls=[self.acl])
+ self.acl_if.add_vpp_config()
+
+ dump = self.acl_if.dump()
+ self.assertTrue(dump)
+ self.assertEqual(dump[0].acls[0], self.acl.acl_index)
+ else:
+ self.acl_if = VppAclInterface(
+ self, sw_if_index=tx_if.sw_if_index, n_input=1,
+ acls=[self.acl])
+ self.acl_if.add_vpp_config()
+ else:
+ if hasattr(self, "acl_if"):
+ self.acl_if.remove_vpp_config()
+ if try_replace and hasattr(self, "acl"):
+ if isMACIP:
+ self.acl.modify_vpp_config(test_dict['macip_rules'])
+ else:
+ self.acl.modify_vpp_config(test_dict['acl_rules'])
+
+ if not isinstance(src_if, VppSubInterface):
+ tx_if.add_stream(test_dict['stream'])
+ else:
+ tx_if.parent.add_stream(test_dict['stream'])
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ if do_not_expected_capture:
+ rx_if.get_capture(0)
+ else:
+ if traffic == self.BRIDGED and mac_type == self.WILD_MAC and \
+ ip_type == self.WILD_IP:
+ capture = rx_if.get_capture(packets)
+ else:
+ capture = rx_if.get_capture(
+ self.get_packet_count_for_if_idx(dst_if.sw_if_index))
+ self.verify_capture(test_dict['stream'], capture, is_ip6)
+ if not isMACIP:
+ if hasattr(self, "acl_if"):
+ self.acl_if.remove_vpp_config()
+ if hasattr(self, "acl"):
+ self.acl.remove_vpp_config()
+
+ def run_test_acls(self, mac_type, ip_type, acl_count,
+ rules_count, traffic=None, ip=None):
+ self.apply_macip_rules(self.create_rules(mac_type, ip_type, acl_count,
+ rules_count))
+ self.verify_macip_acls(acl_count, rules_count)
+
+ if traffic is not None:
+ self.run_traffic(self.EXACT_MAC, self.EXACT_IP, traffic, ip, 9)
+
+
+class TestMACIP_IP4(MethodHolder):
+ """MACIP with IP4 traffic"""
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestMACIP_IP4, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestMACIP_IP4, cls).tearDownClass()
+
+ def test_acl_bridged_ip4_exactMAC_exactIP(self):
+ """ IP4 MACIP exactMAC|exactIP ACL bridged traffic
+ """
+ self.run_traffic(self.EXACT_MAC, self.EXACT_IP,
+ self.BRIDGED, self.IS_IP4, 9)
+
+ def test_acl_bridged_ip4_exactMAC_subnetIP(self):
+ """ IP4 MACIP exactMAC|subnetIP ACL bridged traffic
+ """
+
+ self.run_traffic(self.EXACT_MAC, self.SUBNET_IP,
+ self.BRIDGED, self.IS_IP4, 9)
+
+ def test_acl_bridged_ip4_exactMAC_wildIP(self):
+ """ IP4 MACIP exactMAC|wildIP ACL bridged traffic
+ """
+
+ self.run_traffic(self.EXACT_MAC, self.WILD_IP,
+ self.BRIDGED, self.IS_IP4, 9)
+
+ def test_acl_bridged_ip4_ouiMAC_exactIP(self):
+ """ IP4 MACIP ouiMAC|exactIP ACL bridged traffic
+ """
+
+ self.run_traffic(self.OUI_MAC, self.EXACT_IP,
+ self.BRIDGED, self.IS_IP4, 3)
+
+ def test_acl_bridged_ip4_ouiMAC_subnetIP(self):
+ """ IP4 MACIP ouiMAC|subnetIP ACL bridged traffic
+ """
+
+ self.run_traffic(self.OUI_MAC, self.SUBNET_IP,
+ self.BRIDGED, self.IS_IP4, 9)
+
+ def test_acl_bridged_ip4_ouiMAC_wildIP(self):
+ """ IP4 MACIP ouiMAC|wildIP ACL bridged traffic
+ """
+
+ self.run_traffic(self.OUI_MAC, self.WILD_IP,
+ self.BRIDGED, self.IS_IP4, 9)
+
+ def test_ac_bridgedl_ip4_wildMAC_exactIP(self):
+ """ IP4 MACIP wildcardMAC|exactIP ACL bridged traffic
+ """
+
+ self.run_traffic(self.WILD_MAC, self.EXACT_IP,
+ self.BRIDGED, self.IS_IP4, 9)
+
+ def test_acl_bridged_ip4_wildMAC_subnetIP(self):
+ """ IP4 MACIP wildcardMAC|subnetIP ACL bridged traffic
+ """
+
+ self.run_traffic(self.WILD_MAC, self.SUBNET_IP,
+ self.BRIDGED, self.IS_IP4, 9)
+
+ def test_acl_bridged_ip4_wildMAC_wildIP(self):
+ """ IP4 MACIP wildcardMAC|wildIP ACL bridged traffic
+ """
+
+ self.run_traffic(self.WILD_MAC, self.WILD_IP,
+ self.BRIDGED, self.IS_IP4, 9)
+
+ def test_acl_routed_ip4_exactMAC_exactIP(self):
+ """ IP4 MACIP exactMAC|exactIP ACL routed traffic
+ """
+ self.run_traffic(self.EXACT_MAC, self.EXACT_IP,
+ self.ROUTED, self.IS_IP4, 9)
+
+ def test_acl_routed_ip4_exactMAC_subnetIP(self):
+ """ IP4 MACIP exactMAC|subnetIP ACL routed traffic
+ """
+ self.run_traffic(self.EXACT_MAC, self.SUBNET_IP,
+ self.ROUTED, self.IS_IP4, 9)
+
+ def test_acl_routed_ip4_exactMAC_wildIP(self):
+ """ IP4 MACIP exactMAC|wildIP ACL routed traffic
+ """
+ self.run_traffic(self.EXACT_MAC, self.WILD_IP,
+ self.ROUTED, self.IS_IP4, 9)
+
+ def test_acl_routed_ip4_ouiMAC_exactIP(self):
+ """ IP4 MACIP ouiMAC|exactIP ACL routed traffic
+ """
+
+ self.run_traffic(self.OUI_MAC, self.EXACT_IP,
+ self.ROUTED, self.IS_IP4, 9)
+
+ def test_acl_routed_ip4_ouiMAC_subnetIP(self):
+ """ IP4 MACIP ouiMAC|subnetIP ACL routed traffic
+ """
+
+ self.run_traffic(self.OUI_MAC, self.SUBNET_IP,
+ self.ROUTED, self.IS_IP4, 9)
+
+ def test_acl_routed_ip4_ouiMAC_wildIP(self):
+ """ IP4 MACIP ouiMAC|wildIP ACL routed traffic
+ """
+
+ self.run_traffic(self.OUI_MAC, self.WILD_IP,
+ self.ROUTED, self.IS_IP4, 9)
+
+ def test_acl_routed_ip4_wildMAC_exactIP(self):
+ """ IP4 MACIP wildcardMAC|exactIP ACL routed traffic
+ """
+
+ self.run_traffic(self.WILD_MAC, self.EXACT_IP,
+ self.ROUTED, self.IS_IP4, 9)
+
+ def test_acl_routed_ip4_wildMAC_subnetIP(self):
+ """ IP4 MACIP wildcardMAC|subnetIP ACL routed traffic
+ """
+
+ self.run_traffic(self.WILD_MAC, self.SUBNET_IP,
+ self.ROUTED, self.IS_IP4, 9)
+
+ def test_acl_routed_ip4_wildMAC_wildIP(self):
+ """ IP4 MACIP wildcardMAC|wildIP ACL
+ """
+
+ self.run_traffic(self.WILD_MAC, self.WILD_IP,
+ self.ROUTED, self.IS_IP4, 9)
+
+ def test_acl_replace_traffic_ip4(self):
+ """ MACIP replace ACL with IP4 traffic
+ """
+ self.run_traffic(self.OUI_MAC, self.SUBNET_IP,
+ self.BRIDGED, self.IS_IP4, 9, try_replace=True)
+ self.run_traffic(self.EXACT_MAC, self.EXACT_IP,
+ self.BRIDGED, self.IS_IP4, 9, try_replace=True)
+
+
+class TestMACIP_IP6(MethodHolder):
+ """MACIP with IP6 traffic"""
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestMACIP_IP6, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestMACIP_IP6, cls).tearDownClass()
+
+ def test_acl_bridged_ip6_exactMAC_exactIP(self):
+ """ IP6 MACIP exactMAC|exactIP ACL bridged traffic
+ """
+
+ self.run_traffic(self.EXACT_MAC, self.EXACT_IP,
+ self.BRIDGED, self.IS_IP6, 9)
+
+ def test_acl_bridged_ip6_exactMAC_subnetIP(self):
+ """ IP6 MACIP exactMAC|subnetIP ACL bridged traffic
+ """
+
+ self.run_traffic(self.EXACT_MAC, self.SUBNET_IP,
+ self.BRIDGED, self.IS_IP6, 9)
+
+ def test_acl_bridged_ip6_exactMAC_wildIP(self):
+ """ IP6 MACIP exactMAC|wildIP ACL bridged traffic
+ """
+
+ self.run_traffic(self.EXACT_MAC, self.WILD_IP,
+ self.BRIDGED, self.IS_IP6, 9)
+
+ def test_acl_bridged_ip6_ouiMAC_exactIP(self):
+ """ IP6 MACIP oui_MAC|exactIP ACL bridged traffic
+ """
+
+ self.run_traffic(self.OUI_MAC, self.EXACT_IP,
+ self.BRIDGED, self.IS_IP6, 9)
+
+ def test_acl_bridged_ip6_ouiMAC_subnetIP(self):
+ """ IP6 MACIP ouiMAC|subnetIP ACL bridged traffic
+ """
+
+ self.run_traffic(self.OUI_MAC, self.SUBNET_IP,
+ self.BRIDGED, self.IS_IP6, 9)
+
+ def test_acl_bridged_ip6_ouiMAC_wildIP(self):
+ """ IP6 MACIP ouiMAC|wildIP ACL bridged traffic
+ """
+
+ self.run_traffic(self.OUI_MAC, self.WILD_IP,
+ self.BRIDGED, self.IS_IP6, 9)
+
+ def test_acl_bridged_ip6_wildMAC_exactIP(self):
+ """ IP6 MACIP wildcardMAC|exactIP ACL bridged traffic
+ """
+
+ self.run_traffic(self.WILD_MAC, self.EXACT_IP,
+ self.BRIDGED, self.IS_IP6, 9)
+
+ def test_acl_bridged_ip6_wildMAC_subnetIP(self):
+ """ IP6 MACIP wildcardMAC|subnetIP ACL bridged traffic
+ """
+
+ self.run_traffic(self.WILD_MAC, self.SUBNET_IP,
+ self.BRIDGED, self.IS_IP6, 9)
+
+ def test_acl_bridged_ip6_wildMAC_wildIP(self):
+ """ IP6 MACIP wildcardMAC|wildIP ACL bridged traffic
+ """
+
+ self.run_traffic(self.WILD_MAC, self.WILD_IP,
+ self.BRIDGED, self.IS_IP6, 9)
+
+ def test_acl_routed_ip6_exactMAC_exactIP(self):
+ """ IP6 MACIP exactMAC|exactIP ACL routed traffic
+ """
+
+ self.run_traffic(self.EXACT_MAC, self.EXACT_IP,
+ self.ROUTED, self.IS_IP6, 9)
+
+ def test_acl_routed_ip6_exactMAC_subnetIP(self):
+ """ IP6 MACIP exactMAC|subnetIP ACL routed traffic
+ """
+
+ self.run_traffic(self.EXACT_MAC, self.SUBNET_IP,
+ self.ROUTED, self.IS_IP6, 9)
+
+ def test_acl_routed_ip6_exactMAC_wildIP(self):
+ """ IP6 MACIP exactMAC|wildIP ACL routed traffic
+ """
+
+ self.run_traffic(self.EXACT_MAC, self.WILD_IP,
+ self.ROUTED, self.IS_IP6, 9)
+
+ def test_acl_routed_ip6_ouiMAC_exactIP(self):
+ """ IP6 MACIP ouiMAC|exactIP ACL routed traffic
+ """
+
+ self.run_traffic(self.OUI_MAC, self.EXACT_IP,
+ self.ROUTED, self.IS_IP6, 9)
+
+ def test_acl_routed_ip6_ouiMAC_subnetIP(self):
+ """ IP6 MACIP ouiMAC|subnetIP ACL routed traffic
+ """
+
+ self.run_traffic(self.OUI_MAC, self.SUBNET_IP,
+ self.ROUTED, self.IS_IP6, 9)
+
+ def test_acl_routed_ip6_ouiMAC_wildIP(self):
+ """ IP6 MACIP ouiMAC|wildIP ACL routed traffic
+ """
+
+ self.run_traffic(self.OUI_MAC, self.WILD_IP,
+ self.ROUTED, self.IS_IP6, 9)
+
+ def test_acl_routed_ip6_wildMAC_exactIP(self):
+ """ IP6 MACIP wildcardMAC|exactIP ACL routed traffic
+ """
+
+ self.run_traffic(self.WILD_MAC, self.EXACT_IP,
+ self.ROUTED, self.IS_IP6, 9)
+
+ def test_acl_routed_ip6_wildMAC_subnetIP(self):
+ """ IP6 MACIP wildcardMAC|subnetIP ACL routed traffic
+ """
+
+ self.run_traffic(self.WILD_MAC, self.SUBNET_IP,
+ self.ROUTED, self.IS_IP6, 9)
+
+ def test_acl_routed_ip6_wildMAC_wildIP(self):
+ """ IP6 MACIP wildcardMAC|wildIP ACL
+ """
+
+ self.run_traffic(self.WILD_MAC, self.WILD_IP,
+ self.ROUTED, self.IS_IP6, 9)
+
+ def test_acl_replace_traffic_ip6(self):
+ """ MACIP replace ACL with IP6 traffic
+ """
+ self.run_traffic(self.OUI_MAC, self.SUBNET_IP,
+ self.BRIDGED, self.IS_IP6, 9, try_replace=True)
+ self.run_traffic(self.EXACT_MAC, self.EXACT_IP,
+ self.BRIDGED, self.IS_IP6, 9, try_replace=True)
+
+
+class TestMACIP(MethodHolder):
+ """MACIP Tests"""
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestMACIP, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestMACIP, cls).tearDownClass()
+
+ def test_acl_1_2(self):
+ """ MACIP ACL with 2 entries
+ """
+
+ self.run_test_acls(self.EXACT_MAC, self.WILD_IP, 1, [2])
+
+ def test_acl_1_5(self):
+ """ MACIP ACL with 5 entries
+ """
+
+ self.run_test_acls(self.EXACT_MAC, self.SUBNET_IP, 1, [5])
+
+ def test_acl_1_10(self):
+ """ MACIP ACL with 10 entries
+ """
+
+ self.run_test_acls(self.EXACT_MAC, self.EXACT_IP, 1, [10])
+
+ def test_acl_1_20(self):
+ """ MACIP ACL with 20 entries
+ """
+
+ self.run_test_acls(self.OUI_MAC, self.WILD_IP, 1, [20])
+
+ def test_acl_1_50(self):
+ """ MACIP ACL with 50 entries
+ """
+
+ self.run_test_acls(self.OUI_MAC, self.SUBNET_IP, 1, [50])
+
+ def test_acl_1_100(self):
+ """ MACIP ACL with 100 entries
+ """
+
+ self.run_test_acls(self.OUI_MAC, self.EXACT_IP, 1, [100])
+
+ def test_acl_2_X(self):
+ """ MACIP 2 ACLs each with 100+ entries
+ """
+
+ self.run_test_acls(self.OUI_MAC, self.SUBNET_IP, 2, [100, 200])
+
+ def test_acl_10_X(self):
+ """ MACIP 10 ACLs each with 100+ entries
+ """
+
+ self.run_test_acls(self.EXACT_MAC, self.EXACT_IP, 10,
+ [100, 120, 140, 160, 180, 200, 210, 220, 230, 240])
+
+ def test_acl_10_X_traffic_ip4(self):
+ """ MACIP 10 ACLs each with 100+ entries with IP4 traffic
+ """
+
+ self.run_test_acls(self.EXACT_MAC, self.EXACT_IP, 10,
+ [100, 120, 140, 160, 180, 200, 210, 220, 230, 240],
+ self.BRIDGED, self.IS_IP4)
+
+ def test_acl_10_X_traffic_ip6(self):
+ """ MACIP 10 ACLs each with 100+ entries with IP6 traffic
+ """
+
+ self.run_test_acls(self.EXACT_MAC, self.EXACT_IP, 10,
+ [100, 120, 140, 160, 180, 200, 210, 220, 230, 240],
+ self.BRIDGED, self.IS_IP6)
+
+ def test_acl_replace(self):
+ """ MACIP replace ACL
+ """
+
+ r1 = self.create_rules(acl_count=3, rules_count=[2, 2, 2])
+ r2 = self.create_rules(mac_type=self.OUI_MAC, ip_type=self.SUBNET_IP)
+ macip_acls = self.apply_macip_rules(r1)
+
+ acls_before = self.macip_acl_dump_debug()
+
+ # replace acls #2, #3 with new
+ macip_acls[2].modify_vpp_config(r2[0])
+ macip_acls[3].modify_vpp_config(r2[1])
+
+ acls_after = self.macip_acl_dump_debug()
+
+ # verify changes
+ self.assertEqual(len(acls_before), len(acls_after))
+ for acl1, acl2 in zip(
+ acls_before[:2]+acls_before[4:],
+ acls_after[:2]+acls_after[4:]):
+ self.assertEqual(len(acl1), len(acl2))
+
+ self.assertEqual(len(acl1.r), len(acl2.r))
+ for r1, r2 in zip(acl1.r, acl2.r):
+ self.assertEqual(len(acl1.r), len(acl2.r))
+ self.assertEqual(acl1.r, acl2.r)
+ for acl1, acl2 in zip(
+ acls_before[2:4],
+ acls_after[2:4]):
+ self.assertEqual(len(acl1), len(acl2))
+
+ self.assertNotEqual(len(acl1.r), len(acl2.r))
+ for r1, r2 in zip(acl1.r, acl2.r):
+ self.assertNotEqual(len(acl1.r), len(acl2.r))
+ self.assertNotEqual(acl1.r, acl2.r)
+
+ def test_delete_intf(self):
+ """ MACIP ACL delete intf with acl
+ """
+
+ intf_count = len(self.interfaces)+1
+ intf = []
+ macip_alcs = self.apply_macip_rules(
+ self.create_rules(acl_count=3, rules_count=[3, 5, 4]))
+
+ intf.append(VppLoInterface(self))
+ intf.append(VppLoInterface(self))
+
+ sw_if_index0 = intf[0].sw_if_index
+ macip_acl_if0 = VppMacipAclInterface(
+ self, sw_if_index=sw_if_index0, acls=[macip_alcs[1]])
+ macip_acl_if0.add_vpp_config()
+
+ reply = self.vapi.macip_acl_interface_get()
+ self.assertEqual(reply.count, intf_count+1)
+ self.assertEqual(reply.acls[sw_if_index0], 1)
+
+ sw_if_index1 = intf[1].sw_if_index
+ macip_acl_if1 = VppMacipAclInterface(
+ self, sw_if_index=sw_if_index1, acls=[macip_alcs[0]])
+ macip_acl_if1.add_vpp_config()
+
+ reply = self.vapi.macip_acl_interface_get()
+ self.assertEqual(reply.count, intf_count+2)
+ self.assertEqual(reply.acls[sw_if_index1], 0)
+
+ intf[0].remove_vpp_config()
+ reply = self.vapi.macip_acl_interface_get()
+ self.assertEqual(reply.count, intf_count+2)
+ self.assertEqual(reply.acls[sw_if_index0], 4294967295)
+ self.assertEqual(reply.acls[sw_if_index1], 0)
+
+ intf.append(VppLoInterface(self))
+ intf.append(VppLoInterface(self))
+ sw_if_index2 = intf[2].sw_if_index
+ sw_if_index3 = intf[3].sw_if_index
+ macip_acl_if2 = VppMacipAclInterface(
+ self, sw_if_index=sw_if_index2, acls=[macip_alcs[1]])
+ macip_acl_if2.add_vpp_config()
+ macip_acl_if3 = VppMacipAclInterface(
+ self, sw_if_index=sw_if_index3, acls=[macip_alcs[1]])
+ macip_acl_if3.add_vpp_config()
+
+ reply = self.vapi.macip_acl_interface_get()
+ self.assertEqual(reply.count, intf_count+3)
+ self.assertEqual(reply.acls[sw_if_index1], 0)
+ self.assertEqual(reply.acls[sw_if_index2], 1)
+ self.assertEqual(reply.acls[sw_if_index3], 1)
+ self.logger.info("MACIP ACL on multiple interfaces:")
+ self.logger.info(self.vapi.ppcli("sh acl-plugin macip acl"))
+ self.logger.info(self.vapi.ppcli("sh acl-plugin macip acl index 1234"))
+ self.logger.info(self.vapi.ppcli("sh acl-plugin macip acl index 1"))
+ self.logger.info(self.vapi.ppcli("sh acl-plugin macip acl index 0"))
+ self.logger.info(self.vapi.ppcli("sh acl-plugin macip interface"))
+
+ intf[2].remove_vpp_config()
+ intf[1].remove_vpp_config()
+
+ reply = self.vapi.macip_acl_interface_get()
+ self.assertEqual(reply.count, intf_count+3)
+ self.assertEqual(reply.acls[sw_if_index0], 4294967295)
+ self.assertEqual(reply.acls[sw_if_index1], 4294967295)
+ self.assertEqual(reply.acls[sw_if_index2], 4294967295)
+ self.assertEqual(reply.acls[sw_if_index3], 1)
+
+ intf[3].remove_vpp_config()
+ reply = self.vapi.macip_acl_interface_get()
+
+ self.assertEqual(len([x for x in reply.acls if x != 4294967295]), 0)
+
+
+class TestACL_dot1q_bridged(MethodHolder):
+ """ACL on dot1q bridged subinterfaces Tests"""
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestACL_dot1q_bridged, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestACL_dot1q_bridged, cls).tearDownClass()
+
+ def test_acl_bridged_ip4_subif_dot1q(self):
+ """ IP4 ACL SubIf Dot1Q bridged traffic"""
+ self.run_traffic(self.EXACT_MAC, self.EXACT_IP, self.BRIDGED,
+ self.IS_IP4, 9, tags=self.DOT1Q, isMACIP=False)
+
+ def test_acl_bridged_ip6_subif_dot1q(self):
+ """ IP6 ACL SubIf Dot1Q bridged traffic"""
+ self.run_traffic(self.EXACT_MAC, self.EXACT_IP, self.BRIDGED,
+ self.IS_IP6, 9, tags=self.DOT1Q, isMACIP=False)
+
+
+class TestACL_dot1ad_bridged(MethodHolder):
+ """ACL on dot1ad bridged subinterfaces Tests"""
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestACL_dot1ad_bridged, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestACL_dot1ad_bridged, cls).tearDownClass()
+
+ def test_acl_bridged_ip4_subif_dot1ad(self):
+ """ IP4 ACL SubIf Dot1AD bridged traffic"""
+ self.run_traffic(self.EXACT_MAC, self.EXACT_IP, self.BRIDGED,
+ self.IS_IP4, 9, tags=self.DOT1AD, isMACIP=False)
+
+ def test_acl_bridged_ip6_subif_dot1ad(self):
+ """ IP6 ACL SubIf Dot1AD bridged traffic"""
+ self.run_traffic(self.EXACT_MAC, self.EXACT_IP, self.BRIDGED,
+ self.IS_IP6, 9, tags=self.DOT1AD, isMACIP=False)
+
+
+class TestACL_dot1q_routed(MethodHolder):
+ """ACL on dot1q routed subinterfaces Tests"""
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestACL_dot1q_routed, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestACL_dot1q_routed, cls).tearDownClass()
+
+ def test_acl_routed_ip4_subif_dot1q(self):
+ """ IP4 ACL SubIf Dot1Q routed traffic"""
+ self.run_traffic(self.EXACT_MAC, self.EXACT_IP, self.ROUTED,
+ self.IS_IP4, 9, tags=self.DOT1Q, isMACIP=False)
+
+ def test_acl_routed_ip6_subif_dot1q(self):
+ """ IP6 ACL SubIf Dot1Q routed traffic"""
+ self.run_traffic(self.EXACT_MAC, self.EXACT_IP, self.ROUTED,
+ self.IS_IP6, 9, tags=self.DOT1Q, isMACIP=False)
+
+ def test_acl_routed_ip4_subif_dot1q_deny_by_tags(self):
+ """ IP4 ACL SubIf wrong tags Dot1Q routed traffic"""
+ self.run_traffic(self.EXACT_MAC, self.EXACT_IP, self.ROUTED,
+ self.IS_IP4, 9, True, tags=self.DOT1Q, isMACIP=False,
+ permit_tags=self.DENY_TAGS)
+
+ def test_acl_routed_ip6_subif_dot1q_deny_by_tags(self):
+ """ IP6 ACL SubIf wrong tags Dot1Q routed traffic"""
+ self.run_traffic(self.EXACT_MAC, self.EXACT_IP, self.ROUTED,
+ self.IS_IP6, 9, True, tags=self.DOT1Q, isMACIP=False,
+ permit_tags=self.DENY_TAGS)
+
+
+class TestACL_dot1ad_routed(MethodHolder):
+ """ACL on dot1ad routed subinterfaces Tests"""
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestACL_dot1ad_routed, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestACL_dot1ad_routed, cls).tearDownClass()
+
+ def test_acl_routed_ip6_subif_dot1ad(self):
+ """ IP6 ACL SubIf Dot1AD routed traffic"""
+ self.run_traffic(self.EXACT_MAC, self.EXACT_IP, self.ROUTED,
+ self.IS_IP6, 9, tags=self.DOT1AD, isMACIP=False)
+
+ def test_acl_routed_ip4_subif_dot1ad(self):
+ """ IP4 ACL SubIf Dot1AD routed traffic"""
+ self.run_traffic(self.EXACT_MAC, self.EXACT_IP, self.ROUTED,
+ self.IS_IP4, 9, tags=self.DOT1AD, isMACIP=False)
+
+ def test_acl_routed_ip6_subif_dot1ad_deny_by_tags(self):
+ """ IP6 ACL SubIf wrong tags Dot1AD routed traffic"""
+ self.run_traffic(self.EXACT_MAC, self.EXACT_IP, self.ROUTED,
+ self.IS_IP6, 9, True, tags=self.DOT1AD, isMACIP=False,
+ permit_tags=self.DENY_TAGS)
+
+ def test_acl_routed_ip4_subif_dot1ad_deny_by_tags(self):
+ """ IP4 ACL SubIf wrong tags Dot1AD routed traffic"""
+ self.run_traffic(self.EXACT_MAC, self.EXACT_IP, self.ROUTED,
+ self.IS_IP4, 9, True, tags=self.DOT1AD, isMACIP=False,
+ permit_tags=self.DENY_TAGS)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_adl.py b/test/test_adl.py
new file mode 100644
index 00000000000..4a996fc5c90
--- /dev/null
+++ b/test/test_adl.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python3
+
+import unittest
+
+from framework import VppTestCase, VppTestRunner, running_gcov_tests
+from vpp_ip_route import VppIpTable, VppIpRoute, VppRoutePath
+
+
+class TestAdl(VppTestCase):
+ """ Allow/Deny Plugin Unit Test Cases """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestAdl, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestAdl, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestAdl, self).setUp()
+
+ def tearDown(self):
+ super(TestAdl, self).tearDown()
+
+ def test_adl1_unittest(self):
+ """ Plugin API Test """
+ cmds = ["loop create\n",
+ "set int ip address loop0 192.168.1.1/24\n",
+ "set int ip6 table loop0 0\n",
+ "set int ip address loop0 2001:db01::1/64\n",
+ "set int state loop0 up\n",
+ "packet-generator new {\n"
+ " name ip4\n"
+ " limit 100\n"
+ " rate 0\n"
+ " size 128-128\n"
+ " interface loop0\n"
+ " node adl-input\n"
+ " data { IP4: 1.2.40 -> 3cfd.fed0.b6c8\n"
+ " UDP: 192.168.1.2-192.168.1.10 -> 192.168.2.1\n"
+ " UDP: 1234 -> 2345\n"
+ " incrementing 114\n"
+ " }\n"
+ " }\n",
+ "packet-generator new {\n"
+ " name ip6-allow\n"
+ " limit 50\n"
+ " rate 0\n"
+ " size 128-128\n"
+ " interface loop0\n"
+ " node adl-input\n"
+ " data { IP6: 1.2.40 -> 3cfd.fed0.b6c8\n"
+ " UDP: 2001:db01::2 -> 2001:db01::1\n"
+ " UDP: 1234 -> 2345\n"
+ " incrementing 80\n"
+ " }\n"
+ " }\n",
+ "packet-generator new {\n"
+ " name ip6-drop\n"
+ " limit 50\n"
+ " rate 0\n"
+ " size 128-128\n"
+ " interface loop0\n"
+ " node adl-input\n"
+ " data { IP6: 1.2.40 -> 3cfd.fed0.b6c8\n"
+ " UDP: 2001:db01::3 -> 2001:db01::1\n"
+ " UDP: 1234 -> 2345\n"
+ " incrementing 80\n"
+ " }\n"
+ " }\n",
+ "ip table 1\n",
+ "ip route add 192.168.2.1/32 via drop\n",
+ "ip route add table 1 192.168.1.2/32 via local\n",
+ "ip6 table 1\n",
+ "ip route add 2001:db01::1/128 via drop\n",
+ "ip route add table 1 2001:db01::2/128 via local\n",
+ "bin adl_interface_enable_disable loop0\n",
+ "bin adl_allowlist_enable_disable loop0 fib-id 1 ip4 ip6\n",
+ "pa en\n"]
+
+ for cmd in cmds:
+ r = self.vapi.cli_return_response(cmd)
+ if r.retval != 0:
+ if hasattr(r, 'reply'):
+ self.logger.info(cmd + " FAIL reply " + r.reply)
+ else:
+ self.logger.info(cmd + " FAIL retval " + str(r.retval))
+
+ total_pkts = self.statistics.get_err_counter(
+ "/err/adl-input/Allow/Deny packets processed")
+
+ self.assertEqual(total_pkts, 200)
+
+ ip4_allow = self.statistics.get_err_counter(
+ "/err/ip4-adl-allowlist/ip4 allowlist allowed")
+ self.assertEqual(ip4_allow, 12)
+ ip6_allow = self.statistics.get_err_counter(
+ "/err/ip6-adl-allowlist/ip6 allowlist allowed")
+ self.assertEqual(ip6_allow, 50)
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_arping.py b/test/test_arping.py
new file mode 100644
index 00000000000..bd8b6250a54
--- /dev/null
+++ b/test/test_arping.py
@@ -0,0 +1,251 @@
+from scapy.layers.l2 import ARP
+from scapy.layers.inet6 import ICMPv6ND_NS, ICMPv6ND_NA, IPv6
+
+from framework import VppTestCase
+
+""" TestArping is a subclass of VPPTestCase classes.
+
+Basic test for sanity check of arping.
+
+"""
+
+
+class TestArping(VppTestCase):
+ """ Arping Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestArping, cls).setUpClass()
+ try:
+ cls.create_pg_interfaces(range(2))
+ cls.interfaces = list(cls.pg_interfaces)
+
+ for i in cls.interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.config_ip6()
+ i.disable_ipv6_ra()
+ i.resolve_arp()
+ i.resolve_ndp()
+ except Exception:
+ super(TestArping, cls).tearDownClass()
+ raise
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestArping, cls).tearDownClass()
+
+ def tearDown(self):
+ super(TestArping, self).tearDown()
+
+ def show_commands_at_teardown(self):
+ self.logger.info(self.vapi.cli("show hardware"))
+
+ def verify_arping_request(self, p, src, dst):
+ arp = p[ARP]
+ self.assertEqual(arp.hwtype, 0x0001)
+ self.assertEqual(arp.ptype, 0x0800)
+ self.assertEqual(arp.hwlen, 6)
+ self.assertEqual(arp.op, 1)
+ self.assertEqual(arp.psrc, src)
+ self.assertEqual(arp.pdst, dst)
+
+ def verify_arping_ip6_ns(self, p, src, dst):
+ icmpv6 = p[ICMPv6ND_NS]
+ self.assertEqual(icmpv6.type, 135)
+ self.assertEqual(icmpv6.tgt, dst)
+ ipv6 = p[IPv6]
+ self.assertEqual(src, ipv6.src)
+
+ def verify_arping_ip6_na(self, p, src, dst):
+ icmpv6 = p[ICMPv6ND_NA]
+ self.assertEqual(icmpv6.type, 136)
+ self.assertEqual(icmpv6.tgt, dst)
+ ipv6 = p[IPv6]
+ self.assertEqual(src, ipv6.src)
+
+ def test_arping_ip4_arp_request_cli(self):
+ """ arping IP4 arp request CLI test """
+ try:
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ remote_ip4 = self.pg1.remote_ip4
+
+ ping_cmd = "arping " + remote_ip4 + "pg1 repeat 5 interval 0.1"
+ ret = self.vapi.cli(ping_cmd)
+ self.logger.info(ret)
+
+ ping_cmd = "arping " + remote_ip4 + "pg1"
+ ret = self.vapi.cli(ping_cmd)
+ self.logger.info(ret)
+
+ out = self.pg1.get_capture(6)
+ for p in out:
+ self.verify_arping_request(p, self.pg1.local_ip4,
+ self.pg1.remote_ip4)
+ finally:
+ self.vapi.cli("show error")
+
+ def test_arping_ip4_garp_cli(self):
+ """ arping ip4 gratuitous arp CLI test """
+ try:
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ ping_cmd = ("arping gratuitous" + self.pg1.local_ip4 +
+ "pg1 repeat 5 interval 0.1")
+ ret = self.vapi.cli(ping_cmd)
+ self.logger.info(ret)
+
+ ping_cmd = "arping gratuitous" + self.pg1.local_ip4 + "pg1"
+ ret = self.vapi.cli(ping_cmd)
+ self.logger.info(ret)
+
+ out = self.pg1.get_capture(6)
+ for p in out:
+ self.verify_arping_request(p, self.pg1.local_ip4,
+ self.pg1.local_ip4)
+ finally:
+ self.vapi.cli("show error")
+
+ def test_arping_ip4_arp_request_api(self):
+ """ arping ip4 arp request API test """
+ try:
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ remote_ip4 = self.pg1.remote_ip4
+
+ ret = self.vapi.arping(address=remote_ip4,
+ sw_if_index=self.pg1.sw_if_index,
+ is_garp=0, repeat=5, interval=0.1)
+ self.logger.info(ret)
+
+ ret = self.vapi.arping(address=remote_ip4,
+ sw_if_index=self.pg1.sw_if_index,
+ is_garp=0)
+ self.logger.info(ret)
+
+ out = self.pg1.get_capture(6)
+ for p in out:
+ self.verify_arping_request(p, self.pg1.local_ip4,
+ self.pg1.remote_ip4)
+ finally:
+ self.vapi.cli("show error")
+
+ def test_arping_ip4_garp_api(self):
+ """ arping ip4 gratuitous arp API test """
+ try:
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ ret = self.vapi.arping(address=self.pg1.local_ip4,
+ sw_if_index=self.pg1.sw_if_index,
+ is_garp=1, repeat=5, interval=0.1)
+ self.logger.info(ret)
+
+ ret = self.vapi.arping(address=self.pg1.local_ip4,
+ sw_if_index=self.pg1.sw_if_index,
+ is_garp=1)
+ self.logger.info(ret)
+
+ out = self.pg1.get_capture(6)
+ for p in out:
+ self.verify_arping_request(p, self.pg1.local_ip4,
+ self.pg1.local_ip4)
+ finally:
+ self.vapi.cli("show error")
+
+ def test_arping_ip6_ns_cli(self):
+ """ arping IP6 neighbor solicitation CLI test """
+ try:
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ remote_ip6 = self.pg1.remote_ip6
+
+ ping_cmd = "arping " + remote_ip6 + "pg1 repeat 5 interval 0.1"
+ ret = self.vapi.cli(ping_cmd)
+ self.logger.info(ret)
+
+ ping_cmd = "arping " + remote_ip6 + "pg1"
+ ret = self.vapi.cli(ping_cmd)
+ self.logger.info(ret)
+
+ out = self.pg1.get_capture(6)
+ for p in out:
+ self.verify_arping_ip6_ns(p, self.pg1.local_ip6,
+ self.pg1.remote_ip6)
+ finally:
+ self.vapi.cli("show error")
+
+ def test_arping_ip6_ns_api(self):
+ """ arping ip6 neighbor solicitation API test """
+ try:
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ remote_ip6 = self.pg1.remote_ip6
+
+ ret = self.vapi.arping(address=remote_ip6,
+ sw_if_index=self.pg1.sw_if_index,
+ is_garp=0, repeat=5, interval=0.1)
+ self.logger.info(ret)
+
+ ret = self.vapi.arping(address=remote_ip6,
+ sw_if_index=self.pg1.sw_if_index,
+ is_garp=0)
+ self.logger.info(ret)
+
+ out = self.pg1.get_capture(6)
+ for p in out:
+ self.verify_arping_ip6_ns(p, self.pg1.local_ip6,
+ self.pg1.remote_ip6)
+ finally:
+ self.vapi.cli("show error")
+
+ def test_arping_ip6_na_cli(self):
+ """ arping ip6 neighbor advertisement CLI test """
+ try:
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ ping_cmd = ("arping gratuitous" + self.pg1.local_ip6 +
+ "pg1 repeat 5 interval 0.1")
+ ret = self.vapi.cli(ping_cmd)
+ self.logger.info(ret)
+
+ ping_cmd = "arping gratuitous" + self.pg1.local_ip6 + "pg1"
+ ret = self.vapi.cli(ping_cmd)
+ self.logger.info(ret)
+
+ out = self.pg1.get_capture(6)
+ for p in out:
+ self.verify_arping_ip6_na(p, self.pg1.local_ip6,
+ self.pg1.local_ip6)
+ finally:
+ self.vapi.cli("show error")
+
+ def test_arping_ip6_na_api(self):
+ """ arping ip6 neighbor advertisement API test """
+ try:
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ ret = self.vapi.arping(address=self.pg1.local_ip6,
+ sw_if_index=self.pg1.sw_if_index,
+ is_garp=1, repeat=5, interval=0.1)
+ self.logger.info(ret)
+
+ ret = self.vapi.arping(address=self.pg1.local_ip6,
+ sw_if_index=self.pg1.sw_if_index,
+ is_garp=1)
+ self.logger.info(ret)
+
+ out = self.pg1.get_capture(6)
+ for p in out:
+ self.verify_arping_ip6_na(p, self.pg1.local_ip6,
+ self.pg1.local_ip6)
+ finally:
+ self.vapi.cli("show error")
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_bfd.py b/test/test_bfd.py
new file mode 100644
index 00000000000..01b468c8e27
--- /dev/null
+++ b/test/test_bfd.py
@@ -0,0 +1,2763 @@
+#!/usr/bin/env python3
+""" BFD tests """
+
+from __future__ import division
+
+import binascii
+import hashlib
+import ipaddress
+import reprlib
+import time
+import unittest
+from random import randint, shuffle, getrandbits
+from socket import AF_INET, AF_INET6, inet_ntop
+from struct import pack, unpack
+
+import scapy.compat
+from scapy.layers.inet import UDP, IP
+from scapy.layers.inet6 import IPv6
+from scapy.layers.l2 import Ether, GRE
+from scapy.packet import Raw
+
+from bfd import VppBFDAuthKey, BFD, BFDAuthType, VppBFDUDPSession, \
+ BFDDiagCode, BFDState, BFD_vpp_echo
+from framework import tag_fixme_vpp_workers
+from framework import VppTestCase, VppTestRunner, running_extended_tests
+from framework import tag_run_solo
+from util import ppp
+from vpp_ip import DpoProto
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_lo_interface import VppLoInterface
+from vpp_papi_provider import UnexpectedApiReturnValueError, \
+ CliFailedCommandError
+from vpp_pg_interface import CaptureTimeoutError, is_ipv6_misc
+from vpp_gre_interface import VppGreInterface
+from vpp_papi import VppEnum
+
+USEC_IN_SEC = 1000000
+
+
+class AuthKeyFactory(object):
+ """Factory class for creating auth keys with unique conf key ID"""
+
+ def __init__(self):
+ self._conf_key_ids = {}
+
+ def create_random_key(self, test, auth_type=BFDAuthType.keyed_sha1):
+ """ create a random key with unique conf key id """
+ conf_key_id = randint(0, 0xFFFFFFFF)
+ while conf_key_id in self._conf_key_ids:
+ conf_key_id = randint(0, 0xFFFFFFFF)
+ self._conf_key_ids[conf_key_id] = 1
+ key = scapy.compat.raw(
+ bytearray([randint(0, 255) for _ in range(randint(1, 20))]))
+ return VppBFDAuthKey(test=test, auth_type=auth_type,
+ conf_key_id=conf_key_id, key=key)
+
+
+class BFDAPITestCase(VppTestCase):
+ """Bidirectional Forwarding Detection (BFD) - API"""
+
+ pg0 = None
+ pg1 = None
+
+ @classmethod
+ def setUpClass(cls):
+ super(BFDAPITestCase, cls).setUpClass()
+ cls.vapi.cli("set log class bfd level debug")
+ try:
+ cls.create_pg_interfaces(range(2))
+ for i in cls.pg_interfaces:
+ i.config_ip4()
+ i.config_ip6()
+ i.resolve_arp()
+
+ except Exception:
+ super(BFDAPITestCase, cls).tearDownClass()
+ raise
+
+ @classmethod
+ def tearDownClass(cls):
+ super(BFDAPITestCase, cls).tearDownClass()
+
+ def setUp(self):
+ super(BFDAPITestCase, self).setUp()
+ self.factory = AuthKeyFactory()
+
+ def test_add_bfd(self):
+ """ create a BFD session """
+ session = VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip4)
+ session.add_vpp_config()
+ self.logger.debug("Session state is %s", session.state)
+ session.remove_vpp_config()
+ session.add_vpp_config()
+ self.logger.debug("Session state is %s", session.state)
+ session.remove_vpp_config()
+
+ def test_double_add(self):
+ """ create the same BFD session twice (negative case) """
+ session = VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip4)
+ session.add_vpp_config()
+
+ with self.vapi.assert_negative_api_retval():
+ session.add_vpp_config()
+
+ session.remove_vpp_config()
+
+ def test_add_bfd6(self):
+ """ create IPv6 BFD session """
+ session = VppBFDUDPSession(
+ self, self.pg0, self.pg0.remote_ip6, af=AF_INET6)
+ session.add_vpp_config()
+ self.logger.debug("Session state is %s", session.state)
+ session.remove_vpp_config()
+ session.add_vpp_config()
+ self.logger.debug("Session state is %s", session.state)
+ session.remove_vpp_config()
+
+ def test_mod_bfd(self):
+ """ modify BFD session parameters """
+ session = VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip4,
+ desired_min_tx=50000,
+ required_min_rx=10000,
+ detect_mult=1)
+ session.add_vpp_config()
+ s = session.get_bfd_udp_session_dump_entry()
+ self.assert_equal(session.desired_min_tx,
+ s.desired_min_tx,
+ "desired min transmit interval")
+ self.assert_equal(session.required_min_rx,
+ s.required_min_rx,
+ "required min receive interval")
+ self.assert_equal(session.detect_mult, s.detect_mult, "detect mult")
+ session.modify_parameters(desired_min_tx=session.desired_min_tx * 2,
+ required_min_rx=session.required_min_rx * 2,
+ detect_mult=session.detect_mult * 2)
+ s = session.get_bfd_udp_session_dump_entry()
+ self.assert_equal(session.desired_min_tx,
+ s.desired_min_tx,
+ "desired min transmit interval")
+ self.assert_equal(session.required_min_rx,
+ s.required_min_rx,
+ "required min receive interval")
+ self.assert_equal(session.detect_mult, s.detect_mult, "detect mult")
+
+ def test_add_sha1_keys(self):
+ """ add SHA1 keys """
+ key_count = 10
+ keys = [self.factory.create_random_key(
+ self) for i in range(0, key_count)]
+ for key in keys:
+ self.assertFalse(key.query_vpp_config())
+ for key in keys:
+ key.add_vpp_config()
+ for key in keys:
+ self.assertTrue(key.query_vpp_config())
+ # remove randomly
+ indexes = list(range(key_count))
+ shuffle(indexes)
+ removed = []
+ for i in indexes:
+ key = keys[i]
+ key.remove_vpp_config()
+ removed.append(i)
+ for j in range(key_count):
+ key = keys[j]
+ if j in removed:
+ self.assertFalse(key.query_vpp_config())
+ else:
+ self.assertTrue(key.query_vpp_config())
+ # should be removed now
+ for key in keys:
+ self.assertFalse(key.query_vpp_config())
+ # add back and remove again
+ for key in keys:
+ key.add_vpp_config()
+ for key in keys:
+ self.assertTrue(key.query_vpp_config())
+ for key in keys:
+ key.remove_vpp_config()
+ for key in keys:
+ self.assertFalse(key.query_vpp_config())
+
+ def test_add_bfd_sha1(self):
+ """ create a BFD session (SHA1) """
+ key = self.factory.create_random_key(self)
+ key.add_vpp_config()
+ session = VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip4,
+ sha1_key=key)
+ session.add_vpp_config()
+ self.logger.debug("Session state is %s", session.state)
+ session.remove_vpp_config()
+ session.add_vpp_config()
+ self.logger.debug("Session state is %s", session.state)
+ session.remove_vpp_config()
+
+ def test_double_add_sha1(self):
+ """ create the same BFD session twice (negative case) (SHA1) """
+ key = self.factory.create_random_key(self)
+ key.add_vpp_config()
+ session = VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip4,
+ sha1_key=key)
+ session.add_vpp_config()
+ with self.assertRaises(Exception):
+ session.add_vpp_config()
+
+ def test_add_auth_nonexistent_key(self):
+ """ create BFD session using non-existent SHA1 (negative case) """
+ session = VppBFDUDPSession(
+ self, self.pg0, self.pg0.remote_ip4,
+ sha1_key=self.factory.create_random_key(self))
+ with self.assertRaises(Exception):
+ session.add_vpp_config()
+
+ def test_shared_sha1_key(self):
+ """ share single SHA1 key between multiple BFD sessions """
+ key = self.factory.create_random_key(self)
+ key.add_vpp_config()
+ sessions = [
+ VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip4,
+ sha1_key=key),
+ VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip6,
+ sha1_key=key, af=AF_INET6),
+ VppBFDUDPSession(self, self.pg1, self.pg1.remote_ip4,
+ sha1_key=key),
+ VppBFDUDPSession(self, self.pg1, self.pg1.remote_ip6,
+ sha1_key=key, af=AF_INET6)]
+ for s in sessions:
+ s.add_vpp_config()
+ removed = 0
+ for s in sessions:
+ e = key.get_bfd_auth_keys_dump_entry()
+ self.assert_equal(e.use_count, len(sessions) - removed,
+ "Use count for shared key")
+ s.remove_vpp_config()
+ removed += 1
+ e = key.get_bfd_auth_keys_dump_entry()
+ self.assert_equal(e.use_count, len(sessions) - removed,
+ "Use count for shared key")
+
+ def test_activate_auth(self):
+ """ activate SHA1 authentication """
+ key = self.factory.create_random_key(self)
+ key.add_vpp_config()
+ session = VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip4)
+ session.add_vpp_config()
+ session.activate_auth(key)
+
+ def test_deactivate_auth(self):
+ """ deactivate SHA1 authentication """
+ key = self.factory.create_random_key(self)
+ key.add_vpp_config()
+ session = VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip4)
+ session.add_vpp_config()
+ session.activate_auth(key)
+ session.deactivate_auth()
+
+ def test_change_key(self):
+ """ change SHA1 key """
+ key1 = self.factory.create_random_key(self)
+ key2 = self.factory.create_random_key(self)
+ while key2.conf_key_id == key1.conf_key_id:
+ key2 = self.factory.create_random_key(self)
+ key1.add_vpp_config()
+ key2.add_vpp_config()
+ session = VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip4,
+ sha1_key=key1)
+ session.add_vpp_config()
+ session.activate_auth(key2)
+
+ def test_set_del_udp_echo_source(self):
+ """ set/del udp echo source """
+ self.create_loopback_interfaces(1)
+ self.loopback0 = self.lo_interfaces[0]
+ self.loopback0.admin_up()
+ echo_source = self.vapi.bfd_udp_get_echo_source()
+ self.assertFalse(echo_source.is_set)
+ self.assertFalse(echo_source.have_usable_ip4)
+ self.assertFalse(echo_source.have_usable_ip6)
+
+ self.vapi.bfd_udp_set_echo_source(
+ sw_if_index=self.loopback0.sw_if_index)
+ echo_source = self.vapi.bfd_udp_get_echo_source()
+ self.assertTrue(echo_source.is_set)
+ self.assertEqual(echo_source.sw_if_index, self.loopback0.sw_if_index)
+ self.assertFalse(echo_source.have_usable_ip4)
+ self.assertFalse(echo_source.have_usable_ip6)
+
+ self.loopback0.config_ip4()
+ echo_ip4 = ipaddress.IPv4Address(int(ipaddress.IPv4Address(
+ self.loopback0.local_ip4)) ^ 1).packed
+ echo_source = self.vapi.bfd_udp_get_echo_source()
+ self.assertTrue(echo_source.is_set)
+ self.assertEqual(echo_source.sw_if_index, self.loopback0.sw_if_index)
+ self.assertTrue(echo_source.have_usable_ip4)
+ self.assertEqual(echo_source.ip4_addr.packed, echo_ip4)
+ self.assertFalse(echo_source.have_usable_ip6)
+
+ self.loopback0.config_ip6()
+ echo_ip6 = ipaddress.IPv6Address(int(ipaddress.IPv6Address(
+ self.loopback0.local_ip6)) ^ 1).packed
+
+ echo_source = self.vapi.bfd_udp_get_echo_source()
+ self.assertTrue(echo_source.is_set)
+ self.assertEqual(echo_source.sw_if_index, self.loopback0.sw_if_index)
+ self.assertTrue(echo_source.have_usable_ip4)
+ self.assertEqual(echo_source.ip4_addr.packed, echo_ip4)
+ self.assertTrue(echo_source.have_usable_ip6)
+ self.assertEqual(echo_source.ip6_addr.packed, echo_ip6)
+
+ self.vapi.bfd_udp_del_echo_source()
+ echo_source = self.vapi.bfd_udp_get_echo_source()
+ self.assertFalse(echo_source.is_set)
+ self.assertFalse(echo_source.have_usable_ip4)
+ self.assertFalse(echo_source.have_usable_ip6)
+
+
+class BFDTestSession(object):
+ """ BFD session as seen from test framework side """
+
+ def __init__(self, test, interface, af, detect_mult=3, sha1_key=None,
+ bfd_key_id=None, our_seq_number=None,
+ tunnel_header=None, phy_interface=None):
+ self.test = test
+ self.af = af
+ self.sha1_key = sha1_key
+ self.bfd_key_id = bfd_key_id
+ self.interface = interface
+ if phy_interface:
+ self.phy_interface = phy_interface
+ else:
+ self.phy_interface = self.interface
+ self.udp_sport = randint(49152, 65535)
+ if our_seq_number is None:
+ self.our_seq_number = randint(0, 40000000)
+ else:
+ self.our_seq_number = our_seq_number
+ self.vpp_seq_number = None
+ self.my_discriminator = 0
+ self.desired_min_tx = 300000
+ self.required_min_rx = 300000
+ self.required_min_echo_rx = None
+ self.detect_mult = detect_mult
+ self.diag = BFDDiagCode.no_diagnostic
+ self.your_discriminator = None
+ self.state = BFDState.down
+ self.auth_type = BFDAuthType.no_auth
+ self.tunnel_header = tunnel_header
+
+ def inc_seq_num(self):
+ """ increment sequence number, wrapping if needed """
+ if self.our_seq_number == 0xFFFFFFFF:
+ self.our_seq_number = 0
+ else:
+ self.our_seq_number += 1
+
+ def update(self, my_discriminator=None, your_discriminator=None,
+ desired_min_tx=None, required_min_rx=None,
+ required_min_echo_rx=None, detect_mult=None,
+ diag=None, state=None, auth_type=None):
+ """ update BFD parameters associated with session """
+ if my_discriminator is not None:
+ self.my_discriminator = my_discriminator
+ if your_discriminator is not None:
+ self.your_discriminator = your_discriminator
+ if required_min_rx is not None:
+ self.required_min_rx = required_min_rx
+ if required_min_echo_rx is not None:
+ self.required_min_echo_rx = required_min_echo_rx
+ if desired_min_tx is not None:
+ self.desired_min_tx = desired_min_tx
+ if detect_mult is not None:
+ self.detect_mult = detect_mult
+ if diag is not None:
+ self.diag = diag
+ if state is not None:
+ self.state = state
+ if auth_type is not None:
+ self.auth_type = auth_type
+
+ def fill_packet_fields(self, packet):
+ """ set packet fields with known values in packet """
+ bfd = packet[BFD]
+ if self.my_discriminator:
+ self.test.logger.debug("BFD: setting packet.my_discriminator=%s",
+ self.my_discriminator)
+ bfd.my_discriminator = self.my_discriminator
+ if self.your_discriminator:
+ self.test.logger.debug("BFD: setting packet.your_discriminator=%s",
+ self.your_discriminator)
+ bfd.your_discriminator = self.your_discriminator
+ if self.required_min_rx:
+ self.test.logger.debug(
+ "BFD: setting packet.required_min_rx_interval=%s",
+ self.required_min_rx)
+ bfd.required_min_rx_interval = self.required_min_rx
+ if self.required_min_echo_rx:
+ self.test.logger.debug(
+ "BFD: setting packet.required_min_echo_rx=%s",
+ self.required_min_echo_rx)
+ bfd.required_min_echo_rx_interval = self.required_min_echo_rx
+ if self.desired_min_tx:
+ self.test.logger.debug(
+ "BFD: setting packet.desired_min_tx_interval=%s",
+ self.desired_min_tx)
+ bfd.desired_min_tx_interval = self.desired_min_tx
+ if self.detect_mult:
+ self.test.logger.debug(
+ "BFD: setting packet.detect_mult=%s", self.detect_mult)
+ bfd.detect_mult = self.detect_mult
+ if self.diag:
+ self.test.logger.debug("BFD: setting packet.diag=%s", self.diag)
+ bfd.diag = self.diag
+ if self.state:
+ self.test.logger.debug("BFD: setting packet.state=%s", self.state)
+ bfd.state = self.state
+ if self.auth_type:
+ # this is used by a negative test-case
+ self.test.logger.debug("BFD: setting packet.auth_type=%s",
+ self.auth_type)
+ bfd.auth_type = self.auth_type
+
+ def create_packet(self):
+ """ create a BFD packet, reflecting the current state of session """
+ if self.sha1_key:
+ bfd = BFD(flags="A")
+ bfd.auth_type = self.sha1_key.auth_type
+ bfd.auth_len = BFD.sha1_auth_len
+ bfd.auth_key_id = self.bfd_key_id
+ bfd.auth_seq_num = self.our_seq_number
+ bfd.length = BFD.sha1_auth_len + BFD.bfd_pkt_len
+ else:
+ bfd = BFD()
+ packet = Ether(src=self.phy_interface.remote_mac,
+ dst=self.phy_interface.local_mac)
+ if self.tunnel_header:
+ packet = packet / self.tunnel_header
+ if self.af == AF_INET6:
+ packet = (packet /
+ IPv6(src=self.interface.remote_ip6,
+ dst=self.interface.local_ip6,
+ hlim=255) /
+ UDP(sport=self.udp_sport, dport=BFD.udp_dport) /
+ bfd)
+ else:
+ packet = (packet /
+ IP(src=self.interface.remote_ip4,
+ dst=self.interface.local_ip4,
+ ttl=255) /
+ UDP(sport=self.udp_sport, dport=BFD.udp_dport) /
+ bfd)
+ self.test.logger.debug("BFD: Creating packet")
+ self.fill_packet_fields(packet)
+ if self.sha1_key:
+ hash_material = scapy.compat.raw(
+ packet[BFD])[:32] + self.sha1_key.key + \
+ b"\0" * (20 - len(self.sha1_key.key))
+ self.test.logger.debug("BFD: Calculated SHA1 hash: %s" %
+ hashlib.sha1(hash_material).hexdigest())
+ packet[BFD].auth_key_hash = hashlib.sha1(hash_material).digest()
+ return packet
+
+ def send_packet(self, packet=None, interface=None):
+ """ send packet on interface, creating the packet if needed """
+ if packet is None:
+ packet = self.create_packet()
+ if interface is None:
+ interface = self.phy_interface
+ self.test.logger.debug(ppp("Sending packet:", packet))
+ interface.add_stream(packet)
+ self.test.pg_start()
+
+ def verify_sha1_auth(self, packet):
+ """ Verify correctness of authentication in BFD layer. """
+ bfd = packet[BFD]
+ self.test.assert_equal(bfd.auth_len, 28, "Auth section length")
+ self.test.assert_equal(bfd.auth_type, self.sha1_key.auth_type,
+ BFDAuthType)
+ self.test.assert_equal(bfd.auth_key_id, self.bfd_key_id, "Key ID")
+ self.test.assert_equal(bfd.auth_reserved, 0, "Reserved")
+ if self.vpp_seq_number is None:
+ self.vpp_seq_number = bfd.auth_seq_num
+ self.test.logger.debug("Received initial sequence number: %s" %
+ self.vpp_seq_number)
+ else:
+ recvd_seq_num = bfd.auth_seq_num
+ self.test.logger.debug("Received followup sequence number: %s" %
+ recvd_seq_num)
+ if self.vpp_seq_number < 0xffffffff:
+ if self.sha1_key.auth_type == \
+ BFDAuthType.meticulous_keyed_sha1:
+ self.test.assert_equal(recvd_seq_num,
+ self.vpp_seq_number + 1,
+ "BFD sequence number")
+ else:
+ self.test.assert_in_range(recvd_seq_num,
+ self.vpp_seq_number,
+ self.vpp_seq_number + 1,
+ "BFD sequence number")
+ else:
+ if self.sha1_key.auth_type == \
+ BFDAuthType.meticulous_keyed_sha1:
+ self.test.assert_equal(recvd_seq_num, 0,
+ "BFD sequence number")
+ else:
+ self.test.assertIn(recvd_seq_num, (self.vpp_seq_number, 0),
+ "BFD sequence number not one of "
+ "(%s, 0)" % self.vpp_seq_number)
+ self.vpp_seq_number = recvd_seq_num
+ # last 20 bytes represent the hash - so replace them with the key,
+ # pad the result with zeros and hash the result
+ hash_material = bfd.original[:-20] + self.sha1_key.key + \
+ b"\0" * (20 - len(self.sha1_key.key))
+ expected_hash = hashlib.sha1(hash_material).hexdigest()
+ self.test.assert_equal(binascii.hexlify(bfd.auth_key_hash),
+ expected_hash.encode(), "Auth key hash")
+
+ def verify_bfd(self, packet):
+ """ Verify correctness of BFD layer. """
+ bfd = packet[BFD]
+ self.test.assert_equal(bfd.version, 1, "BFD version")
+ self.test.assert_equal(bfd.your_discriminator,
+ self.my_discriminator,
+ "BFD - your discriminator")
+ if self.sha1_key:
+ self.verify_sha1_auth(packet)
+
+
+def bfd_session_up(test):
+ """ Bring BFD session up """
+ test.logger.info("BFD: Waiting for slow hello")
+ p = wait_for_bfd_packet(test, 2, is_tunnel=test.vpp_session.is_tunnel)
+ old_offset = None
+ if hasattr(test, 'vpp_clock_offset'):
+ old_offset = test.vpp_clock_offset
+ test.vpp_clock_offset = time.time() - float(p.time)
+ test.logger.debug("BFD: Calculated vpp clock offset: %s",
+ test.vpp_clock_offset)
+ if old_offset:
+ test.assertAlmostEqual(
+ old_offset, test.vpp_clock_offset, delta=0.5,
+ msg="vpp clock offset not stable (new: %s, old: %s)" %
+ (test.vpp_clock_offset, old_offset))
+ test.logger.info("BFD: Sending Init")
+ test.test_session.update(my_discriminator=randint(0, 40000000),
+ your_discriminator=p[BFD].my_discriminator,
+ state=BFDState.init)
+ if test.test_session.sha1_key and test.test_session.sha1_key.auth_type == \
+ BFDAuthType.meticulous_keyed_sha1:
+ test.test_session.inc_seq_num()
+ test.test_session.send_packet()
+ test.logger.info("BFD: Waiting for event")
+ e = test.vapi.wait_for_event(1, "bfd_udp_session_event")
+ verify_event(test, e, expected_state=BFDState.up)
+ test.logger.info("BFD: Session is Up")
+ test.test_session.update(state=BFDState.up)
+ if test.test_session.sha1_key and test.test_session.sha1_key.auth_type == \
+ BFDAuthType.meticulous_keyed_sha1:
+ test.test_session.inc_seq_num()
+ test.test_session.send_packet()
+ test.assert_equal(test.vpp_session.state, BFDState.up, BFDState)
+
+
+def bfd_session_down(test):
+ """ Bring BFD session down """
+ test.assert_equal(test.vpp_session.state, BFDState.up, BFDState)
+ test.test_session.update(state=BFDState.down)
+ if test.test_session.sha1_key and test.test_session.sha1_key.auth_type == \
+ BFDAuthType.meticulous_keyed_sha1:
+ test.test_session.inc_seq_num()
+ test.test_session.send_packet()
+ test.logger.info("BFD: Waiting for event")
+ e = test.vapi.wait_for_event(1, "bfd_udp_session_event")
+ verify_event(test, e, expected_state=BFDState.down)
+ test.logger.info("BFD: Session is Down")
+ test.assert_equal(test.vpp_session.state, BFDState.down, BFDState)
+
+
+def verify_bfd_session_config(test, session, state=None):
+ dump = session.get_bfd_udp_session_dump_entry()
+ test.assertIsNotNone(dump)
+ # since dump is not none, we have verified that sw_if_index and addresses
+ # are valid (in get_bfd_udp_session_dump_entry)
+ if state:
+ test.assert_equal(dump.state, state, "session state")
+ test.assert_equal(dump.required_min_rx, session.required_min_rx,
+ "required min rx interval")
+ test.assert_equal(dump.desired_min_tx, session.desired_min_tx,
+ "desired min tx interval")
+ test.assert_equal(dump.detect_mult, session.detect_mult,
+ "detect multiplier")
+ if session.sha1_key is None:
+ test.assert_equal(dump.is_authenticated, 0, "is_authenticated flag")
+ else:
+ test.assert_equal(dump.is_authenticated, 1, "is_authenticated flag")
+ test.assert_equal(dump.bfd_key_id, session.bfd_key_id,
+ "bfd key id")
+ test.assert_equal(dump.conf_key_id,
+ session.sha1_key.conf_key_id,
+ "config key id")
+
+
+def verify_ip(test, packet):
+ """ Verify correctness of IP layer. """
+ if test.vpp_session.af == AF_INET6:
+ ip = packet[IPv6]
+ local_ip = test.vpp_session.interface.local_ip6
+ remote_ip = test.vpp_session.interface.remote_ip6
+ test.assert_equal(ip.hlim, 255, "IPv6 hop limit")
+ else:
+ ip = packet[IP]
+ local_ip = test.vpp_session.interface.local_ip4
+ remote_ip = test.vpp_session.interface.remote_ip4
+ test.assert_equal(ip.ttl, 255, "IPv4 TTL")
+ test.assert_equal(ip.src, local_ip, "IP source address")
+ test.assert_equal(ip.dst, remote_ip, "IP destination address")
+
+
+def verify_udp(test, packet):
+ """ Verify correctness of UDP layer. """
+ udp = packet[UDP]
+ test.assert_equal(udp.dport, BFD.udp_dport, "UDP destination port")
+ test.assert_in_range(udp.sport, BFD.udp_sport_min, BFD.udp_sport_max,
+ "UDP source port")
+
+
+def verify_event(test, event, expected_state):
+ """ Verify correctness of event values. """
+ e = event
+ test.logger.debug("BFD: Event: %s" % reprlib.repr(e))
+ test.assert_equal(e.sw_if_index,
+ test.vpp_session.interface.sw_if_index,
+ "BFD interface index")
+
+ test.assert_equal(str(e.local_addr), test.vpp_session.local_addr,
+ "Local IPv6 address")
+ test.assert_equal(str(e.peer_addr), test.vpp_session.peer_addr,
+ "Peer IPv6 address")
+ test.assert_equal(e.state, expected_state, BFDState)
+
+
+def wait_for_bfd_packet(test, timeout=1, pcap_time_min=None, is_tunnel=False):
+ """ wait for BFD packet and verify its correctness
+
+ :param timeout: how long to wait
+ :param pcap_time_min: ignore packets with pcap timestamp lower than this
+
+ :returns: tuple (packet, time spent waiting for packet)
+ """
+ test.logger.info("BFD: Waiting for BFD packet")
+ deadline = time.time() + timeout
+ counter = 0
+ while True:
+ counter += 1
+ # sanity check
+ test.assert_in_range(counter, 0, 100, "number of packets ignored")
+ time_left = deadline - time.time()
+ if time_left < 0:
+ raise CaptureTimeoutError("Packet did not arrive within timeout")
+ p = test.pg0.wait_for_packet(timeout=time_left)
+ test.logger.debug(ppp("BFD: Got packet:", p))
+ if pcap_time_min is not None and p.time < pcap_time_min:
+ test.logger.debug(ppp("BFD: ignoring packet (pcap time %s < "
+ "pcap time min %s):" %
+ (p.time, pcap_time_min), p))
+ else:
+ break
+ if is_tunnel:
+ # strip an IP layer and move to the next
+ p = p[IP].payload
+
+ bfd = p[BFD]
+ if bfd is None:
+ raise Exception(ppp("Unexpected or invalid BFD packet:", p))
+ if bfd.payload:
+ raise Exception(ppp("Unexpected payload in BFD packet:", bfd))
+ verify_ip(test, p)
+ verify_udp(test, p)
+ test.test_session.verify_bfd(p)
+ return p
+
+
+@tag_run_solo
+class BFD4TestCase(VppTestCase):
+ """Bidirectional Forwarding Detection (BFD)"""
+
+ pg0 = None
+ vpp_clock_offset = None
+ vpp_session = None
+ test_session = None
+
+ @classmethod
+ def setUpClass(cls):
+ super(BFD4TestCase, cls).setUpClass()
+ cls.vapi.cli("set log class bfd level debug")
+ try:
+ cls.create_pg_interfaces([0])
+ cls.create_loopback_interfaces(1)
+ cls.loopback0 = cls.lo_interfaces[0]
+ cls.loopback0.config_ip4()
+ cls.loopback0.admin_up()
+ cls.pg0.config_ip4()
+ cls.pg0.configure_ipv4_neighbors()
+ cls.pg0.admin_up()
+ cls.pg0.resolve_arp()
+
+ except Exception:
+ super(BFD4TestCase, cls).tearDownClass()
+ raise
+
+ @classmethod
+ def tearDownClass(cls):
+ super(BFD4TestCase, cls).tearDownClass()
+
+ def setUp(self):
+ super(BFD4TestCase, self).setUp()
+ self.factory = AuthKeyFactory()
+ self.vapi.want_bfd_events()
+ self.pg0.enable_capture()
+ try:
+ self.vpp_session = VppBFDUDPSession(self, self.pg0,
+ self.pg0.remote_ip4)
+ self.vpp_session.add_vpp_config()
+ self.vpp_session.admin_up()
+ self.test_session = BFDTestSession(self, self.pg0, AF_INET)
+ except BaseException:
+ self.vapi.want_bfd_events(enable_disable=0)
+ raise
+
+ def tearDown(self):
+ if not self.vpp_dead:
+ self.vapi.want_bfd_events(enable_disable=0)
+ self.vapi.collect_events() # clear the event queue
+ super(BFD4TestCase, self).tearDown()
+
+ def test_session_up(self):
+ """ bring BFD session up """
+ bfd_session_up(self)
+
+ def test_session_up_by_ip(self):
+ """ bring BFD session up - first frame looked up by address pair """
+ self.logger.info("BFD: Sending Slow control frame")
+ self.test_session.update(my_discriminator=randint(0, 40000000))
+ self.test_session.send_packet()
+ self.pg0.enable_capture()
+ p = self.pg0.wait_for_packet(1)
+ self.assert_equal(p[BFD].your_discriminator,
+ self.test_session.my_discriminator,
+ "BFD - your discriminator")
+ self.assert_equal(p[BFD].state, BFDState.init, BFDState)
+ self.test_session.update(your_discriminator=p[BFD].my_discriminator,
+ state=BFDState.up)
+ self.logger.info("BFD: Waiting for event")
+ e = self.vapi.wait_for_event(1, "bfd_udp_session_event")
+ verify_event(self, e, expected_state=BFDState.init)
+ self.logger.info("BFD: Sending Up")
+ self.test_session.send_packet()
+ self.logger.info("BFD: Waiting for event")
+ e = self.vapi.wait_for_event(1, "bfd_udp_session_event")
+ verify_event(self, e, expected_state=BFDState.up)
+ self.logger.info("BFD: Session is Up")
+ self.test_session.update(state=BFDState.up)
+ self.test_session.send_packet()
+ self.assert_equal(self.vpp_session.state, BFDState.up, BFDState)
+
+ def test_session_down(self):
+ """ bring BFD session down """
+ bfd_session_up(self)
+ bfd_session_down(self)
+
+ def test_hold_up(self):
+ """ hold BFD session up """
+ bfd_session_up(self)
+ for dummy in range(self.test_session.detect_mult * 2):
+ wait_for_bfd_packet(self)
+ self.test_session.send_packet()
+ self.assert_equal(len(self.vapi.collect_events()), 0,
+ "number of bfd events")
+
+ def test_slow_timer(self):
+ """ verify slow periodic control frames while session down """
+ packet_count = 3
+ self.logger.info("BFD: Waiting for %d BFD packets", packet_count)
+ prev_packet = wait_for_bfd_packet(self, 2)
+ for dummy in range(packet_count):
+ next_packet = wait_for_bfd_packet(self, 2)
+ time_diff = next_packet.time - prev_packet.time
+ # spec says the range should be <0.75, 1>, allow extra 0.05 margin
+ # to work around timing issues
+ self.assert_in_range(
+ time_diff, 0.70, 1.05, "time between slow packets")
+ prev_packet = next_packet
+
+ def test_zero_remote_min_rx(self):
+ """ no packets when zero remote required min rx interval """
+ bfd_session_up(self)
+ self.test_session.update(required_min_rx=0)
+ self.test_session.send_packet()
+ for dummy in range(self.test_session.detect_mult):
+ self.sleep(self.vpp_session.required_min_rx / USEC_IN_SEC,
+ "sleep before transmitting bfd packet")
+ self.test_session.send_packet()
+ try:
+ p = wait_for_bfd_packet(self, timeout=0)
+ self.logger.error(ppp("Received unexpected packet:", p))
+ except CaptureTimeoutError:
+ pass
+ self.assert_equal(
+ len(self.vapi.collect_events()), 0, "number of bfd events")
+ self.test_session.update(required_min_rx=300000)
+ for dummy in range(3):
+ self.test_session.send_packet()
+ wait_for_bfd_packet(
+ self, timeout=self.test_session.required_min_rx / USEC_IN_SEC)
+ self.assert_equal(
+ len(self.vapi.collect_events()), 0, "number of bfd events")
+
+ def test_conn_down(self):
+ """ verify session goes down after inactivity """
+ bfd_session_up(self)
+ detection_time = self.test_session.detect_mult *\
+ self.vpp_session.required_min_rx / USEC_IN_SEC
+ self.sleep(detection_time, "waiting for BFD session time-out")
+ e = self.vapi.wait_for_event(1, "bfd_udp_session_event")
+ verify_event(self, e, expected_state=BFDState.down)
+
+ def test_peer_discr_reset_sess_down(self):
+ """ peer discriminator reset after session goes down """
+ bfd_session_up(self)
+ detection_time = self.test_session.detect_mult *\
+ self.vpp_session.required_min_rx / USEC_IN_SEC
+ self.sleep(detection_time, "waiting for BFD session time-out")
+ self.test_session.my_discriminator = 0
+ wait_for_bfd_packet(self,
+ pcap_time_min=time.time() - self.vpp_clock_offset)
+
+ def test_large_required_min_rx(self):
+ """ large remote required min rx interval """
+ bfd_session_up(self)
+ p = wait_for_bfd_packet(self)
+ interval = 3000000
+ self.test_session.update(required_min_rx=interval)
+ self.test_session.send_packet()
+ time_mark = time.time()
+ count = 0
+ # busy wait here, trying to collect a packet or event, vpp is not
+ # allowed to send packets and the session will timeout first - so the
+ # Up->Down event must arrive before any packets do
+ while time.time() < time_mark + interval / USEC_IN_SEC:
+ try:
+ p = wait_for_bfd_packet(self, timeout=0)
+ # if vpp managed to send a packet before we did the session
+ # session update, then that's fine, ignore it
+ if p.time < time_mark - self.vpp_clock_offset:
+ continue
+ self.logger.error(ppp("Received unexpected packet:", p))
+ count += 1
+ except CaptureTimeoutError:
+ pass
+ events = self.vapi.collect_events()
+ if len(events) > 0:
+ verify_event(self, events[0], BFDState.down)
+ break
+ self.assert_equal(count, 0, "number of packets received")
+
+ def test_immediate_remote_min_rx_reduction(self):
+ """ immediately honor remote required min rx reduction """
+ self.vpp_session.remove_vpp_config()
+ self.vpp_session = VppBFDUDPSession(
+ self, self.pg0, self.pg0.remote_ip4, desired_min_tx=10000)
+ self.pg0.enable_capture()
+ self.vpp_session.add_vpp_config()
+ self.test_session.update(desired_min_tx=1000000,
+ required_min_rx=1000000)
+ bfd_session_up(self)
+ reference_packet = wait_for_bfd_packet(self)
+ time_mark = time.time()
+ interval = 300000
+ self.test_session.update(required_min_rx=interval)
+ self.test_session.send_packet()
+ extra_time = time.time() - time_mark
+ p = wait_for_bfd_packet(self)
+ # first packet is allowed to be late by time we spent doing the update
+ # calculated in extra_time
+ self.assert_in_range(p.time - reference_packet.time,
+ .95 * 0.75 * interval / USEC_IN_SEC,
+ 1.05 * interval / USEC_IN_SEC + extra_time,
+ "time between BFD packets")
+ reference_packet = p
+ for dummy in range(3):
+ p = wait_for_bfd_packet(self)
+ diff = p.time - reference_packet.time
+ self.assert_in_range(diff, .95 * .75 * interval / USEC_IN_SEC,
+ 1.05 * interval / USEC_IN_SEC,
+ "time between BFD packets")
+ reference_packet = p
+
+ def test_modify_req_min_rx_double(self):
+ """ modify session - double required min rx """
+ bfd_session_up(self)
+ p = wait_for_bfd_packet(self)
+ self.test_session.update(desired_min_tx=10000,
+ required_min_rx=10000)
+ self.test_session.send_packet()
+ # double required min rx
+ self.vpp_session.modify_parameters(
+ required_min_rx=2 * self.vpp_session.required_min_rx)
+ p = wait_for_bfd_packet(
+ self, pcap_time_min=time.time() - self.vpp_clock_offset)
+ # poll bit needs to be set
+ self.assertIn("P", p.sprintf("%BFD.flags%"),
+ "Poll bit not set in BFD packet")
+ # finish poll sequence with final packet
+ final = self.test_session.create_packet()
+ final[BFD].flags = "F"
+ timeout = self.test_session.detect_mult * \
+ max(self.test_session.desired_min_tx,
+ self.vpp_session.required_min_rx) / USEC_IN_SEC
+ self.test_session.send_packet(final)
+ time_mark = time.time()
+ e = self.vapi.wait_for_event(2 * timeout, "bfd_udp_session_event")
+ verify_event(self, e, expected_state=BFDState.down)
+ time_to_event = time.time() - time_mark
+ self.assert_in_range(time_to_event, .9 * timeout,
+ 1.1 * timeout, "session timeout")
+
+ def test_modify_req_min_rx_halve(self):
+ """ modify session - halve required min rx """
+ self.vpp_session.modify_parameters(
+ required_min_rx=2 * self.vpp_session.required_min_rx)
+ bfd_session_up(self)
+ p = wait_for_bfd_packet(self)
+ self.test_session.update(desired_min_tx=10000,
+ required_min_rx=10000)
+ self.test_session.send_packet()
+ p = wait_for_bfd_packet(
+ self, pcap_time_min=time.time() - self.vpp_clock_offset)
+ # halve required min rx
+ old_required_min_rx = self.vpp_session.required_min_rx
+ self.vpp_session.modify_parameters(
+ required_min_rx=self.vpp_session.required_min_rx // 2)
+ # now we wait 0.8*3*old-req-min-rx and the session should still be up
+ self.sleep(0.8 * self.vpp_session.detect_mult *
+ old_required_min_rx / USEC_IN_SEC,
+ "wait before finishing poll sequence")
+ self.assert_equal(len(self.vapi.collect_events()), 0,
+ "number of bfd events")
+ p = wait_for_bfd_packet(self)
+ # poll bit needs to be set
+ self.assertIn("P", p.sprintf("%BFD.flags%"),
+ "Poll bit not set in BFD packet")
+ # finish poll sequence with final packet
+ final = self.test_session.create_packet()
+ final[BFD].flags = "F"
+ self.test_session.send_packet(final)
+ # now the session should time out under new conditions
+ detection_time = self.test_session.detect_mult *\
+ self.vpp_session.required_min_rx / USEC_IN_SEC
+ before = time.time()
+ e = self.vapi.wait_for_event(
+ 2 * detection_time, "bfd_udp_session_event")
+ after = time.time()
+ self.assert_in_range(after - before,
+ 0.9 * detection_time,
+ 1.1 * detection_time,
+ "time before bfd session goes down")
+ verify_event(self, e, expected_state=BFDState.down)
+
+ def test_modify_detect_mult(self):
+ """ modify detect multiplier """
+ bfd_session_up(self)
+ p = wait_for_bfd_packet(self)
+ self.vpp_session.modify_parameters(detect_mult=1)
+ p = wait_for_bfd_packet(
+ self, pcap_time_min=time.time() - self.vpp_clock_offset)
+ self.assert_equal(self.vpp_session.detect_mult,
+ p[BFD].detect_mult,
+ "detect mult")
+ # poll bit must not be set
+ self.assertNotIn("P", p.sprintf("%BFD.flags%"),
+ "Poll bit not set in BFD packet")
+ self.vpp_session.modify_parameters(detect_mult=10)
+ p = wait_for_bfd_packet(
+ self, pcap_time_min=time.time() - self.vpp_clock_offset)
+ self.assert_equal(self.vpp_session.detect_mult,
+ p[BFD].detect_mult,
+ "detect mult")
+ # poll bit must not be set
+ self.assertNotIn("P", p.sprintf("%BFD.flags%"),
+ "Poll bit not set in BFD packet")
+
+ def test_queued_poll(self):
+ """ test poll sequence queueing """
+ bfd_session_up(self)
+ p = wait_for_bfd_packet(self)
+ self.vpp_session.modify_parameters(
+ required_min_rx=2 * self.vpp_session.required_min_rx)
+ p = wait_for_bfd_packet(self)
+ poll_sequence_start = time.time()
+ poll_sequence_length_min = 0.5
+ send_final_after = time.time() + poll_sequence_length_min
+ # poll bit needs to be set
+ self.assertIn("P", p.sprintf("%BFD.flags%"),
+ "Poll bit not set in BFD packet")
+ self.assert_equal(p[BFD].required_min_rx_interval,
+ self.vpp_session.required_min_rx,
+ "BFD required min rx interval")
+ self.vpp_session.modify_parameters(
+ required_min_rx=2 * self.vpp_session.required_min_rx)
+ # 2nd poll sequence should be queued now
+ # don't send the reply back yet, wait for some time to emulate
+ # longer round-trip time
+ packet_count = 0
+ while time.time() < send_final_after:
+ self.test_session.send_packet()
+ p = wait_for_bfd_packet(self)
+ self.assert_equal(len(self.vapi.collect_events()), 0,
+ "number of bfd events")
+ self.assert_equal(p[BFD].required_min_rx_interval,
+ self.vpp_session.required_min_rx,
+ "BFD required min rx interval")
+ packet_count += 1
+ # poll bit must be set
+ self.assertIn("P", p.sprintf("%BFD.flags%"),
+ "Poll bit not set in BFD packet")
+ final = self.test_session.create_packet()
+ final[BFD].flags = "F"
+ self.test_session.send_packet(final)
+ # finish 1st with final
+ poll_sequence_length = time.time() - poll_sequence_start
+ # vpp must wait for some time before starting new poll sequence
+ poll_no_2_started = False
+ for dummy in range(2 * packet_count):
+ p = wait_for_bfd_packet(self)
+ self.assert_equal(len(self.vapi.collect_events()), 0,
+ "number of bfd events")
+ if "P" in p.sprintf("%BFD.flags%"):
+ poll_no_2_started = True
+ if time.time() < poll_sequence_start + poll_sequence_length:
+ raise Exception("VPP started 2nd poll sequence too soon")
+ final = self.test_session.create_packet()
+ final[BFD].flags = "F"
+ self.test_session.send_packet(final)
+ break
+ else:
+ self.test_session.send_packet()
+ self.assertTrue(poll_no_2_started, "2nd poll sequence not performed")
+ # finish 2nd with final
+ final = self.test_session.create_packet()
+ final[BFD].flags = "F"
+ self.test_session.send_packet(final)
+ p = wait_for_bfd_packet(self)
+ # poll bit must not be set
+ self.assertNotIn("P", p.sprintf("%BFD.flags%"),
+ "Poll bit set in BFD packet")
+
+ # returning inconsistent results requiring retries in per-patch tests
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ def test_poll_response(self):
+ """ test correct response to control frame with poll bit set """
+ bfd_session_up(self)
+ poll = self.test_session.create_packet()
+ poll[BFD].flags = "P"
+ self.test_session.send_packet(poll)
+ final = wait_for_bfd_packet(
+ self, pcap_time_min=time.time() - self.vpp_clock_offset)
+ self.assertIn("F", final.sprintf("%BFD.flags%"))
+
+ def test_no_periodic_if_remote_demand(self):
+ """ no periodic frames outside poll sequence if remote demand set """
+ bfd_session_up(self)
+ demand = self.test_session.create_packet()
+ demand[BFD].flags = "D"
+ self.test_session.send_packet(demand)
+ transmit_time = 0.9 \
+ * max(self.vpp_session.required_min_rx,
+ self.test_session.desired_min_tx) \
+ / USEC_IN_SEC
+ count = 0
+ for dummy in range(self.test_session.detect_mult * 2):
+ self.sleep(transmit_time)
+ self.test_session.send_packet(demand)
+ try:
+ p = wait_for_bfd_packet(self, timeout=0)
+ self.logger.error(ppp("Received unexpected packet:", p))
+ count += 1
+ except CaptureTimeoutError:
+ pass
+ events = self.vapi.collect_events()
+ for e in events:
+ self.logger.error("Received unexpected event: %s", e)
+ self.assert_equal(count, 0, "number of packets received")
+ self.assert_equal(len(events), 0, "number of events received")
+
+ def test_echo_looped_back(self):
+ """ echo packets looped back """
+ # don't need a session in this case..
+ self.vpp_session.remove_vpp_config()
+ self.pg0.enable_capture()
+ echo_packet_count = 10
+ # random source port low enough to increment a few times..
+ udp_sport_tx = randint(1, 50000)
+ udp_sport_rx = udp_sport_tx
+ echo_packet = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4,
+ dst=self.pg0.remote_ip4) /
+ UDP(dport=BFD.udp_dport_echo) /
+ Raw("this should be looped back"))
+ for dummy in range(echo_packet_count):
+ self.sleep(.01, "delay between echo packets")
+ echo_packet[UDP].sport = udp_sport_tx
+ udp_sport_tx += 1
+ self.logger.debug(ppp("Sending packet:", echo_packet))
+ self.pg0.add_stream(echo_packet)
+ self.pg_start()
+ for dummy in range(echo_packet_count):
+ p = self.pg0.wait_for_packet(1)
+ self.logger.debug(ppp("Got packet:", p))
+ ether = p[Ether]
+ self.assert_equal(self.pg0.remote_mac,
+ ether.dst, "Destination MAC")
+ self.assert_equal(self.pg0.local_mac, ether.src, "Source MAC")
+ ip = p[IP]
+ self.assert_equal(self.pg0.remote_ip4, ip.dst, "Destination IP")
+ self.assert_equal(self.pg0.remote_ip4, ip.src, "Destination IP")
+ udp = p[UDP]
+ self.assert_equal(udp.dport, BFD.udp_dport_echo,
+ "UDP destination port")
+ self.assert_equal(udp.sport, udp_sport_rx, "UDP source port")
+ udp_sport_rx += 1
+ # need to compare the hex payload here, otherwise BFD_vpp_echo
+ # gets in way
+ self.assertEqual(scapy.compat.raw(p[UDP].payload),
+ scapy.compat.raw(echo_packet[UDP].payload),
+ "Received packet is not the echo packet sent")
+ self.assert_equal(udp_sport_tx, udp_sport_rx, "UDP source port (== "
+ "ECHO packet identifier for test purposes)")
+
+ def test_echo(self):
+ """ echo function """
+ bfd_session_up(self)
+ self.test_session.update(required_min_echo_rx=150000)
+ self.test_session.send_packet()
+ detection_time = self.test_session.detect_mult *\
+ self.vpp_session.required_min_rx / USEC_IN_SEC
+ # echo shouldn't work without echo source set
+ for dummy in range(10):
+ sleep = self.vpp_session.required_min_rx / USEC_IN_SEC
+ self.sleep(sleep, "delay before sending bfd packet")
+ self.test_session.send_packet()
+ p = wait_for_bfd_packet(
+ self, pcap_time_min=time.time() - self.vpp_clock_offset)
+ self.assert_equal(p[BFD].required_min_rx_interval,
+ self.vpp_session.required_min_rx,
+ "BFD required min rx interval")
+ self.test_session.send_packet()
+ self.vapi.bfd_udp_set_echo_source(
+ sw_if_index=self.loopback0.sw_if_index)
+ echo_seen = False
+ # should be turned on - loopback echo packets
+ for dummy in range(3):
+ loop_until = time.time() + 0.75 * detection_time
+ while time.time() < loop_until:
+ p = self.pg0.wait_for_packet(1)
+ self.logger.debug(ppp("Got packet:", p))
+ if p[UDP].dport == BFD.udp_dport_echo:
+ self.assert_equal(
+ p[IP].dst, self.pg0.local_ip4, "BFD ECHO dst IP")
+ self.assertNotEqual(p[IP].src, self.loopback0.local_ip4,
+ "BFD ECHO src IP equal to loopback IP")
+ self.logger.debug(ppp("Looping back packet:", p))
+ self.assert_equal(p[Ether].dst, self.pg0.remote_mac,
+ "ECHO packet destination MAC address")
+ p[Ether].dst = self.pg0.local_mac
+ self.pg0.add_stream(p)
+ self.pg_start()
+ echo_seen = True
+ elif p.haslayer(BFD):
+ if echo_seen:
+ self.assertGreaterEqual(
+ p[BFD].required_min_rx_interval,
+ 1000000)
+ if "P" in p.sprintf("%BFD.flags%"):
+ final = self.test_session.create_packet()
+ final[BFD].flags = "F"
+ self.test_session.send_packet(final)
+ else:
+ raise Exception(ppp("Received unknown packet:", p))
+
+ self.assert_equal(len(self.vapi.collect_events()), 0,
+ "number of bfd events")
+ self.test_session.send_packet()
+ self.assertTrue(echo_seen, "No echo packets received")
+
+ def test_echo_fail(self):
+ """ session goes down if echo function fails """
+ bfd_session_up(self)
+ self.test_session.update(required_min_echo_rx=150000)
+ self.test_session.send_packet()
+ detection_time = self.test_session.detect_mult *\
+ self.vpp_session.required_min_rx / USEC_IN_SEC
+ self.vapi.bfd_udp_set_echo_source(
+ sw_if_index=self.loopback0.sw_if_index)
+ # echo function should be used now, but we will drop the echo packets
+ verified_diag = False
+ for dummy in range(3):
+ loop_until = time.time() + 0.75 * detection_time
+ while time.time() < loop_until:
+ p = self.pg0.wait_for_packet(1)
+ self.logger.debug(ppp("Got packet:", p))
+ if p[UDP].dport == BFD.udp_dport_echo:
+ # dropped
+ pass
+ elif p.haslayer(BFD):
+ if "P" in p.sprintf("%BFD.flags%"):
+ self.assertGreaterEqual(
+ p[BFD].required_min_rx_interval,
+ 1000000)
+ final = self.test_session.create_packet()
+ final[BFD].flags = "F"
+ self.test_session.send_packet(final)
+ if p[BFD].state == BFDState.down:
+ self.assert_equal(p[BFD].diag,
+ BFDDiagCode.echo_function_failed,
+ BFDDiagCode)
+ verified_diag = True
+ else:
+ raise Exception(ppp("Received unknown packet:", p))
+ self.test_session.send_packet()
+ events = self.vapi.collect_events()
+ self.assert_equal(len(events), 1, "number of bfd events")
+ self.assert_equal(events[0].state, BFDState.down, BFDState)
+ self.assertTrue(verified_diag, "Incorrect diagnostics code received")
+
+ def test_echo_stop(self):
+ """ echo function stops if peer sets required min echo rx zero """
+ bfd_session_up(self)
+ self.test_session.update(required_min_echo_rx=150000)
+ self.test_session.send_packet()
+ self.vapi.bfd_udp_set_echo_source(
+ sw_if_index=self.loopback0.sw_if_index)
+ # wait for first echo packet
+ while True:
+ p = self.pg0.wait_for_packet(1)
+ self.logger.debug(ppp("Got packet:", p))
+ if p[UDP].dport == BFD.udp_dport_echo:
+ self.logger.debug(ppp("Looping back packet:", p))
+ p[Ether].dst = self.pg0.local_mac
+ self.pg0.add_stream(p)
+ self.pg_start()
+ break
+ elif p.haslayer(BFD):
+ # ignore BFD
+ pass
+ else:
+ raise Exception(ppp("Received unknown packet:", p))
+ self.test_session.update(required_min_echo_rx=0)
+ self.test_session.send_packet()
+ # echo packets shouldn't arrive anymore
+ for dummy in range(5):
+ wait_for_bfd_packet(
+ self, pcap_time_min=time.time() - self.vpp_clock_offset)
+ self.test_session.send_packet()
+ events = self.vapi.collect_events()
+ self.assert_equal(len(events), 0, "number of bfd events")
+
+ def test_echo_source_removed(self):
+ """ echo function stops if echo source is removed """
+ bfd_session_up(self)
+ self.test_session.update(required_min_echo_rx=150000)
+ self.test_session.send_packet()
+ self.vapi.bfd_udp_set_echo_source(
+ sw_if_index=self.loopback0.sw_if_index)
+ # wait for first echo packet
+ while True:
+ p = self.pg0.wait_for_packet(1)
+ self.logger.debug(ppp("Got packet:", p))
+ if p[UDP].dport == BFD.udp_dport_echo:
+ self.logger.debug(ppp("Looping back packet:", p))
+ p[Ether].dst = self.pg0.local_mac
+ self.pg0.add_stream(p)
+ self.pg_start()
+ break
+ elif p.haslayer(BFD):
+ # ignore BFD
+ pass
+ else:
+ raise Exception(ppp("Received unknown packet:", p))
+ self.vapi.bfd_udp_del_echo_source()
+ self.test_session.send_packet()
+ # echo packets shouldn't arrive anymore
+ for dummy in range(5):
+ wait_for_bfd_packet(
+ self, pcap_time_min=time.time() - self.vpp_clock_offset)
+ self.test_session.send_packet()
+ events = self.vapi.collect_events()
+ self.assert_equal(len(events), 0, "number of bfd events")
+
+ def test_stale_echo(self):
+ """ stale echo packets don't keep a session up """
+ bfd_session_up(self)
+ self.test_session.update(required_min_echo_rx=150000)
+ self.vapi.bfd_udp_set_echo_source(
+ sw_if_index=self.loopback0.sw_if_index)
+ self.test_session.send_packet()
+ # should be turned on - loopback echo packets
+ echo_packet = None
+ timeout_at = None
+ timeout_ok = False
+ for dummy in range(10 * self.vpp_session.detect_mult):
+ p = self.pg0.wait_for_packet(1)
+ if p[UDP].dport == BFD.udp_dport_echo:
+ if echo_packet is None:
+ self.logger.debug(ppp("Got first echo packet:", p))
+ echo_packet = p
+ timeout_at = time.time() + self.vpp_session.detect_mult * \
+ self.test_session.required_min_echo_rx / USEC_IN_SEC
+ else:
+ self.logger.debug(ppp("Got followup echo packet:", p))
+ self.logger.debug(ppp("Looping back first echo packet:", p))
+ echo_packet[Ether].dst = self.pg0.local_mac
+ self.pg0.add_stream(echo_packet)
+ self.pg_start()
+ elif p.haslayer(BFD):
+ self.logger.debug(ppp("Got packet:", p))
+ if "P" in p.sprintf("%BFD.flags%"):
+ final = self.test_session.create_packet()
+ final[BFD].flags = "F"
+ self.test_session.send_packet(final)
+ if p[BFD].state == BFDState.down:
+ self.assertIsNotNone(
+ timeout_at,
+ "Session went down before first echo packet received")
+ now = time.time()
+ self.assertGreaterEqual(
+ now, timeout_at,
+ "Session timeout at %s, but is expected at %s" %
+ (now, timeout_at))
+ self.assert_equal(p[BFD].diag,
+ BFDDiagCode.echo_function_failed,
+ BFDDiagCode)
+ events = self.vapi.collect_events()
+ self.assert_equal(len(events), 1, "number of bfd events")
+ self.assert_equal(events[0].state, BFDState.down, BFDState)
+ timeout_ok = True
+ break
+ else:
+ raise Exception(ppp("Received unknown packet:", p))
+ self.test_session.send_packet()
+ self.assertTrue(timeout_ok, "Expected timeout event didn't occur")
+
+ def test_invalid_echo_checksum(self):
+ """ echo packets with invalid checksum don't keep a session up """
+ bfd_session_up(self)
+ self.test_session.update(required_min_echo_rx=150000)
+ self.vapi.bfd_udp_set_echo_source(
+ sw_if_index=self.loopback0.sw_if_index)
+ self.test_session.send_packet()
+ # should be turned on - loopback echo packets
+ timeout_at = None
+ timeout_ok = False
+ for dummy in range(10 * self.vpp_session.detect_mult):
+ p = self.pg0.wait_for_packet(1)
+ if p[UDP].dport == BFD.udp_dport_echo:
+ self.logger.debug(ppp("Got echo packet:", p))
+ if timeout_at is None:
+ timeout_at = time.time() + self.vpp_session.detect_mult * \
+ self.test_session.required_min_echo_rx / USEC_IN_SEC
+ p[BFD_vpp_echo].checksum = getrandbits(64)
+ p[Ether].dst = self.pg0.local_mac
+ self.logger.debug(ppp("Looping back modified echo packet:", p))
+ self.pg0.add_stream(p)
+ self.pg_start()
+ elif p.haslayer(BFD):
+ self.logger.debug(ppp("Got packet:", p))
+ if "P" in p.sprintf("%BFD.flags%"):
+ final = self.test_session.create_packet()
+ final[BFD].flags = "F"
+ self.test_session.send_packet(final)
+ if p[BFD].state == BFDState.down:
+ self.assertIsNotNone(
+ timeout_at,
+ "Session went down before first echo packet received")
+ now = time.time()
+ self.assertGreaterEqual(
+ now, timeout_at,
+ "Session timeout at %s, but is expected at %s" %
+ (now, timeout_at))
+ self.assert_equal(p[BFD].diag,
+ BFDDiagCode.echo_function_failed,
+ BFDDiagCode)
+ events = self.vapi.collect_events()
+ self.assert_equal(len(events), 1, "number of bfd events")
+ self.assert_equal(events[0].state, BFDState.down, BFDState)
+ timeout_ok = True
+ break
+ else:
+ raise Exception(ppp("Received unknown packet:", p))
+ self.test_session.send_packet()
+ self.assertTrue(timeout_ok, "Expected timeout event didn't occur")
+
+ def test_admin_up_down(self):
+ """ put session admin-up and admin-down """
+ bfd_session_up(self)
+ self.vpp_session.admin_down()
+ self.pg0.enable_capture()
+ e = self.vapi.wait_for_event(1, "bfd_udp_session_event")
+ verify_event(self, e, expected_state=BFDState.admin_down)
+ for dummy in range(2):
+ p = wait_for_bfd_packet(self)
+ self.assert_equal(p[BFD].state, BFDState.admin_down, BFDState)
+ # try to bring session up - shouldn't be possible
+ self.test_session.update(state=BFDState.init)
+ self.test_session.send_packet()
+ for dummy in range(2):
+ p = wait_for_bfd_packet(self)
+ self.assert_equal(p[BFD].state, BFDState.admin_down, BFDState)
+ self.vpp_session.admin_up()
+ self.test_session.update(state=BFDState.down)
+ e = self.vapi.wait_for_event(1, "bfd_udp_session_event")
+ verify_event(self, e, expected_state=BFDState.down)
+ p = wait_for_bfd_packet(
+ self, pcap_time_min=time.time() - self.vpp_clock_offset)
+ self.assert_equal(p[BFD].state, BFDState.down, BFDState)
+ self.test_session.send_packet()
+ p = wait_for_bfd_packet(
+ self, pcap_time_min=time.time() - self.vpp_clock_offset)
+ self.assert_equal(p[BFD].state, BFDState.init, BFDState)
+ e = self.vapi.wait_for_event(1, "bfd_udp_session_event")
+ verify_event(self, e, expected_state=BFDState.init)
+ self.test_session.update(state=BFDState.up)
+ self.test_session.send_packet()
+ p = wait_for_bfd_packet(
+ self, pcap_time_min=time.time() - self.vpp_clock_offset)
+ self.assert_equal(p[BFD].state, BFDState.up, BFDState)
+ e = self.vapi.wait_for_event(1, "bfd_udp_session_event")
+ verify_event(self, e, expected_state=BFDState.up)
+
+ def test_config_change_remote_demand(self):
+ """ configuration change while peer in demand mode """
+ bfd_session_up(self)
+ demand = self.test_session.create_packet()
+ demand[BFD].flags = "D"
+ self.test_session.send_packet(demand)
+ self.vpp_session.modify_parameters(
+ required_min_rx=2 * self.vpp_session.required_min_rx)
+ p = wait_for_bfd_packet(
+ self, pcap_time_min=time.time() - self.vpp_clock_offset)
+ # poll bit must be set
+ self.assertIn("P", p.sprintf("%BFD.flags%"), "Poll bit not set")
+ # terminate poll sequence
+ final = self.test_session.create_packet()
+ final[BFD].flags = "D+F"
+ self.test_session.send_packet(final)
+ # vpp should be quiet now again
+ transmit_time = 0.9 \
+ * max(self.vpp_session.required_min_rx,
+ self.test_session.desired_min_tx) \
+ / USEC_IN_SEC
+ count = 0
+ for dummy in range(self.test_session.detect_mult * 2):
+ self.sleep(transmit_time)
+ self.test_session.send_packet(demand)
+ try:
+ p = wait_for_bfd_packet(self, timeout=0)
+ self.logger.error(ppp("Received unexpected packet:", p))
+ count += 1
+ except CaptureTimeoutError:
+ pass
+ events = self.vapi.collect_events()
+ for e in events:
+ self.logger.error("Received unexpected event: %s", e)
+ self.assert_equal(count, 0, "number of packets received")
+ self.assert_equal(len(events), 0, "number of events received")
+
+ def test_intf_deleted(self):
+ """ interface with bfd session deleted """
+ intf = VppLoInterface(self)
+ intf.config_ip4()
+ intf.admin_up()
+ sw_if_index = intf.sw_if_index
+ vpp_session = VppBFDUDPSession(self, intf, intf.remote_ip4)
+ vpp_session.add_vpp_config()
+ vpp_session.admin_up()
+ intf.remove_vpp_config()
+ e = self.vapi.wait_for_event(1, "bfd_udp_session_event")
+ self.assert_equal(e.sw_if_index, sw_if_index, "sw_if_index")
+ self.assertFalse(vpp_session.query_vpp_config())
+
+
+@tag_run_solo
+@tag_fixme_vpp_workers
+class BFD6TestCase(VppTestCase):
+ """Bidirectional Forwarding Detection (BFD) (IPv6) """
+
+ pg0 = None
+ vpp_clock_offset = None
+ vpp_session = None
+ test_session = None
+
+ @classmethod
+ def setUpClass(cls):
+ super(BFD6TestCase, cls).setUpClass()
+ cls.vapi.cli("set log class bfd level debug")
+ try:
+ cls.create_pg_interfaces([0])
+ cls.pg0.config_ip6()
+ cls.pg0.configure_ipv6_neighbors()
+ cls.pg0.admin_up()
+ cls.pg0.resolve_ndp()
+ cls.create_loopback_interfaces(1)
+ cls.loopback0 = cls.lo_interfaces[0]
+ cls.loopback0.config_ip6()
+ cls.loopback0.admin_up()
+
+ except Exception:
+ super(BFD6TestCase, cls).tearDownClass()
+ raise
+
+ @classmethod
+ def tearDownClass(cls):
+ super(BFD6TestCase, cls).tearDownClass()
+
+ def setUp(self):
+ super(BFD6TestCase, self).setUp()
+ self.factory = AuthKeyFactory()
+ self.vapi.want_bfd_events()
+ self.pg0.enable_capture()
+ try:
+ self.vpp_session = VppBFDUDPSession(self, self.pg0,
+ self.pg0.remote_ip6,
+ af=AF_INET6)
+ self.vpp_session.add_vpp_config()
+ self.vpp_session.admin_up()
+ self.test_session = BFDTestSession(self, self.pg0, AF_INET6)
+ self.logger.debug(self.vapi.cli("show adj nbr"))
+ except BaseException:
+ self.vapi.want_bfd_events(enable_disable=0)
+ raise
+
+ def tearDown(self):
+ if not self.vpp_dead:
+ self.vapi.want_bfd_events(enable_disable=0)
+ self.vapi.collect_events() # clear the event queue
+ super(BFD6TestCase, self).tearDown()
+
+ def test_session_up(self):
+ """ bring BFD session up """
+ bfd_session_up(self)
+
+ def test_session_up_by_ip(self):
+ """ bring BFD session up - first frame looked up by address pair """
+ self.logger.info("BFD: Sending Slow control frame")
+ self.test_session.update(my_discriminator=randint(0, 40000000))
+ self.test_session.send_packet()
+ self.pg0.enable_capture()
+ p = self.pg0.wait_for_packet(1)
+ self.assert_equal(p[BFD].your_discriminator,
+ self.test_session.my_discriminator,
+ "BFD - your discriminator")
+ self.assert_equal(p[BFD].state, BFDState.init, BFDState)
+ self.test_session.update(your_discriminator=p[BFD].my_discriminator,
+ state=BFDState.up)
+ self.logger.info("BFD: Waiting for event")
+ e = self.vapi.wait_for_event(1, "bfd_udp_session_event")
+ verify_event(self, e, expected_state=BFDState.init)
+ self.logger.info("BFD: Sending Up")
+ self.test_session.send_packet()
+ self.logger.info("BFD: Waiting for event")
+ e = self.vapi.wait_for_event(1, "bfd_udp_session_event")
+ verify_event(self, e, expected_state=BFDState.up)
+ self.logger.info("BFD: Session is Up")
+ self.test_session.update(state=BFDState.up)
+ self.test_session.send_packet()
+ self.assert_equal(self.vpp_session.state, BFDState.up, BFDState)
+
+ def test_hold_up(self):
+ """ hold BFD session up """
+ bfd_session_up(self)
+ for dummy in range(self.test_session.detect_mult * 2):
+ wait_for_bfd_packet(self)
+ self.test_session.send_packet()
+ self.assert_equal(len(self.vapi.collect_events()), 0,
+ "number of bfd events")
+ self.assert_equal(self.vpp_session.state, BFDState.up, BFDState)
+
+ def test_echo_looped_back(self):
+ """ echo packets looped back """
+ # don't need a session in this case..
+ self.vpp_session.remove_vpp_config()
+ self.pg0.enable_capture()
+ echo_packet_count = 10
+ # random source port low enough to increment a few times..
+ udp_sport_tx = randint(1, 50000)
+ udp_sport_rx = udp_sport_tx
+ echo_packet = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg0.local_mac) /
+ IPv6(src=self.pg0.remote_ip6,
+ dst=self.pg0.remote_ip6) /
+ UDP(dport=BFD.udp_dport_echo) /
+ Raw("this should be looped back"))
+ for dummy in range(echo_packet_count):
+ self.sleep(.01, "delay between echo packets")
+ echo_packet[UDP].sport = udp_sport_tx
+ udp_sport_tx += 1
+ self.logger.debug(ppp("Sending packet:", echo_packet))
+ self.pg0.add_stream(echo_packet)
+ self.pg_start()
+ for dummy in range(echo_packet_count):
+ p = self.pg0.wait_for_packet(1)
+ self.logger.debug(ppp("Got packet:", p))
+ ether = p[Ether]
+ self.assert_equal(self.pg0.remote_mac,
+ ether.dst, "Destination MAC")
+ self.assert_equal(self.pg0.local_mac, ether.src, "Source MAC")
+ ip = p[IPv6]
+ self.assert_equal(self.pg0.remote_ip6, ip.dst, "Destination IP")
+ self.assert_equal(self.pg0.remote_ip6, ip.src, "Destination IP")
+ udp = p[UDP]
+ self.assert_equal(udp.dport, BFD.udp_dport_echo,
+ "UDP destination port")
+ self.assert_equal(udp.sport, udp_sport_rx, "UDP source port")
+ udp_sport_rx += 1
+ # need to compare the hex payload here, otherwise BFD_vpp_echo
+ # gets in way
+ self.assertEqual(scapy.compat.raw(p[UDP].payload),
+ scapy.compat.raw(echo_packet[UDP].payload),
+ "Received packet is not the echo packet sent")
+ self.assert_equal(udp_sport_tx, udp_sport_rx, "UDP source port (== "
+ "ECHO packet identifier for test purposes)")
+ self.assert_equal(udp_sport_tx, udp_sport_rx, "UDP source port (== "
+ "ECHO packet identifier for test purposes)")
+
+ def test_echo(self):
+ """ echo function """
+ bfd_session_up(self)
+ self.test_session.update(required_min_echo_rx=150000)
+ self.test_session.send_packet()
+ detection_time = self.test_session.detect_mult *\
+ self.vpp_session.required_min_rx / USEC_IN_SEC
+ # echo shouldn't work without echo source set
+ for dummy in range(10):
+ sleep = self.vpp_session.required_min_rx / USEC_IN_SEC
+ self.sleep(sleep, "delay before sending bfd packet")
+ self.test_session.send_packet()
+ p = wait_for_bfd_packet(
+ self, pcap_time_min=time.time() - self.vpp_clock_offset)
+ self.assert_equal(p[BFD].required_min_rx_interval,
+ self.vpp_session.required_min_rx,
+ "BFD required min rx interval")
+ self.test_session.send_packet()
+ self.vapi.bfd_udp_set_echo_source(
+ sw_if_index=self.loopback0.sw_if_index)
+ echo_seen = False
+ # should be turned on - loopback echo packets
+ for dummy in range(3):
+ loop_until = time.time() + 0.75 * detection_time
+ while time.time() < loop_until:
+ p = self.pg0.wait_for_packet(1)
+ self.logger.debug(ppp("Got packet:", p))
+ if p[UDP].dport == BFD.udp_dport_echo:
+ self.assert_equal(
+ p[IPv6].dst, self.pg0.local_ip6, "BFD ECHO dst IP")
+ self.assertNotEqual(p[IPv6].src, self.loopback0.local_ip6,
+ "BFD ECHO src IP equal to loopback IP")
+ self.logger.debug(ppp("Looping back packet:", p))
+ self.assert_equal(p[Ether].dst, self.pg0.remote_mac,
+ "ECHO packet destination MAC address")
+ p[Ether].dst = self.pg0.local_mac
+ self.pg0.add_stream(p)
+ self.pg_start()
+ echo_seen = True
+ elif p.haslayer(BFD):
+ if echo_seen:
+ self.assertGreaterEqual(
+ p[BFD].required_min_rx_interval,
+ 1000000)
+ if "P" in p.sprintf("%BFD.flags%"):
+ final = self.test_session.create_packet()
+ final[BFD].flags = "F"
+ self.test_session.send_packet(final)
+ else:
+ raise Exception(ppp("Received unknown packet:", p))
+
+ self.assert_equal(len(self.vapi.collect_events()), 0,
+ "number of bfd events")
+ self.test_session.send_packet()
+ self.assertTrue(echo_seen, "No echo packets received")
+
+ def test_intf_deleted(self):
+ """ interface with bfd session deleted """
+ intf = VppLoInterface(self)
+ intf.config_ip6()
+ intf.admin_up()
+ sw_if_index = intf.sw_if_index
+ vpp_session = VppBFDUDPSession(
+ self, intf, intf.remote_ip6, af=AF_INET6)
+ vpp_session.add_vpp_config()
+ vpp_session.admin_up()
+ intf.remove_vpp_config()
+ e = self.vapi.wait_for_event(1, "bfd_udp_session_event")
+ self.assert_equal(e.sw_if_index, sw_if_index, "sw_if_index")
+ self.assertFalse(vpp_session.query_vpp_config())
+
+
+@tag_run_solo
+class BFDFIBTestCase(VppTestCase):
+ """ BFD-FIB interactions (IPv6) """
+
+ vpp_session = None
+ test_session = None
+
+ @classmethod
+ def setUpClass(cls):
+ super(BFDFIBTestCase, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(BFDFIBTestCase, cls).tearDownClass()
+
+ def setUp(self):
+ super(BFDFIBTestCase, self).setUp()
+ self.create_pg_interfaces(range(1))
+
+ self.vapi.want_bfd_events()
+ self.pg0.enable_capture()
+
+ for i in self.pg_interfaces:
+ i.admin_up()
+ i.config_ip6()
+ i.configure_ipv6_neighbors()
+
+ def tearDown(self):
+ if not self.vpp_dead:
+ self.vapi.want_bfd_events(enable_disable=False)
+
+ super(BFDFIBTestCase, self).tearDown()
+
+ @staticmethod
+ def pkt_is_not_data_traffic(p):
+ """ not data traffic implies BFD or the usual IPv6 ND/RA"""
+ if p.haslayer(BFD) or is_ipv6_misc(p):
+ return True
+ return False
+
+ def test_session_with_fib(self):
+ """ BFD-FIB interactions """
+
+ # packets to match against both of the routes
+ p = [(Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(src="3001::1", dst="2001::1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(src="3001::1", dst="2002::1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))]
+
+ # A recursive and a non-recursive route via a next-hop that
+ # will have a BFD session
+ ip_2001_s_64 = VppIpRoute(self, "2001::", 64,
+ [VppRoutePath(self.pg0.remote_ip6,
+ self.pg0.sw_if_index)])
+ ip_2002_s_64 = VppIpRoute(self, "2002::", 64,
+ [VppRoutePath(self.pg0.remote_ip6,
+ 0xffffffff)])
+ ip_2001_s_64.add_vpp_config()
+ ip_2002_s_64.add_vpp_config()
+
+ # bring the session up now the routes are present
+ self.vpp_session = VppBFDUDPSession(self,
+ self.pg0,
+ self.pg0.remote_ip6,
+ af=AF_INET6)
+ self.vpp_session.add_vpp_config()
+ self.vpp_session.admin_up()
+ self.test_session = BFDTestSession(self, self.pg0, AF_INET6)
+
+ # session is up - traffic passes
+ bfd_session_up(self)
+
+ self.pg0.add_stream(p)
+ self.pg_start()
+ for packet in p:
+ captured = self.pg0.wait_for_packet(
+ 1,
+ filter_out_fn=self.pkt_is_not_data_traffic)
+ self.assertEqual(captured[IPv6].dst,
+ packet[IPv6].dst)
+
+ # session is up - traffic is dropped
+ bfd_session_down(self)
+
+ self.pg0.add_stream(p)
+ self.pg_start()
+ with self.assertRaises(CaptureTimeoutError):
+ self.pg0.wait_for_packet(1, self.pkt_is_not_data_traffic)
+
+ # session is up - traffic passes
+ bfd_session_up(self)
+
+ self.pg0.add_stream(p)
+ self.pg_start()
+ for packet in p:
+ captured = self.pg0.wait_for_packet(
+ 1,
+ filter_out_fn=self.pkt_is_not_data_traffic)
+ self.assertEqual(captured[IPv6].dst,
+ packet[IPv6].dst)
+
+
+@unittest.skipUnless(running_extended_tests, "part of extended tests")
+class BFDTunTestCase(VppTestCase):
+ """ BFD over GRE tunnel """
+
+ vpp_session = None
+ test_session = None
+
+ @classmethod
+ def setUpClass(cls):
+ super(BFDTunTestCase, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(BFDTunTestCase, cls).tearDownClass()
+
+ def setUp(self):
+ super(BFDTunTestCase, self).setUp()
+ self.create_pg_interfaces(range(1))
+
+ self.vapi.want_bfd_events()
+ self.pg0.enable_capture()
+
+ for i in self.pg_interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+
+ def tearDown(self):
+ if not self.vpp_dead:
+ self.vapi.want_bfd_events(enable_disable=0)
+
+ super(BFDTunTestCase, self).tearDown()
+
+ @staticmethod
+ def pkt_is_not_data_traffic(p):
+ """ not data traffic implies BFD or the usual IPv6 ND/RA"""
+ if p.haslayer(BFD) or is_ipv6_misc(p):
+ return True
+ return False
+
+ def test_bfd_o_gre(self):
+ """ BFD-o-GRE """
+
+ # A GRE interface over which to run a BFD session
+ gre_if = VppGreInterface(self,
+ self.pg0.local_ip4,
+ self.pg0.remote_ip4)
+ gre_if.add_vpp_config()
+ gre_if.admin_up()
+ gre_if.config_ip4()
+
+ # bring the session up now the routes are present
+ self.vpp_session = VppBFDUDPSession(self,
+ gre_if,
+ gre_if.remote_ip4,
+ is_tunnel=True)
+ self.vpp_session.add_vpp_config()
+ self.vpp_session.admin_up()
+
+ self.test_session = BFDTestSession(
+ self, gre_if, AF_INET,
+ tunnel_header=(IP(src=self.pg0.remote_ip4,
+ dst=self.pg0.local_ip4) /
+ GRE()),
+ phy_interface=self.pg0)
+
+ # packets to match against both of the routes
+ p = [(Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=gre_if.remote_ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))]
+
+ # session is up - traffic passes
+ bfd_session_up(self)
+
+ self.send_and_expect(self.pg0, p, self.pg0)
+
+ # bring session down
+ bfd_session_down(self)
+
+
+@tag_run_solo
+class BFDSHA1TestCase(VppTestCase):
+ """Bidirectional Forwarding Detection (BFD) (SHA1 auth) """
+
+ pg0 = None
+ vpp_clock_offset = None
+ vpp_session = None
+ test_session = None
+
+ @classmethod
+ def setUpClass(cls):
+ super(BFDSHA1TestCase, cls).setUpClass()
+ cls.vapi.cli("set log class bfd level debug")
+ try:
+ cls.create_pg_interfaces([0])
+ cls.pg0.config_ip4()
+ cls.pg0.admin_up()
+ cls.pg0.resolve_arp()
+
+ except Exception:
+ super(BFDSHA1TestCase, cls).tearDownClass()
+ raise
+
+ @classmethod
+ def tearDownClass(cls):
+ super(BFDSHA1TestCase, cls).tearDownClass()
+
+ def setUp(self):
+ super(BFDSHA1TestCase, self).setUp()
+ self.factory = AuthKeyFactory()
+ self.vapi.want_bfd_events()
+ self.pg0.enable_capture()
+
+ def tearDown(self):
+ if not self.vpp_dead:
+ self.vapi.want_bfd_events(enable_disable=False)
+ self.vapi.collect_events() # clear the event queue
+ super(BFDSHA1TestCase, self).tearDown()
+
+ def test_session_up(self):
+ """ bring BFD session up """
+ key = self.factory.create_random_key(self)
+ key.add_vpp_config()
+ self.vpp_session = VppBFDUDPSession(self, self.pg0,
+ self.pg0.remote_ip4,
+ sha1_key=key)
+ self.vpp_session.add_vpp_config()
+ self.vpp_session.admin_up()
+ self.test_session = BFDTestSession(
+ self, self.pg0, AF_INET, sha1_key=key,
+ bfd_key_id=self.vpp_session.bfd_key_id)
+ bfd_session_up(self)
+
+ def test_hold_up(self):
+ """ hold BFD session up """
+ key = self.factory.create_random_key(self)
+ key.add_vpp_config()
+ self.vpp_session = VppBFDUDPSession(self, self.pg0,
+ self.pg0.remote_ip4,
+ sha1_key=key)
+ self.vpp_session.add_vpp_config()
+ self.vpp_session.admin_up()
+ self.test_session = BFDTestSession(
+ self, self.pg0, AF_INET, sha1_key=key,
+ bfd_key_id=self.vpp_session.bfd_key_id)
+ bfd_session_up(self)
+ for dummy in range(self.test_session.detect_mult * 2):
+ wait_for_bfd_packet(self)
+ self.test_session.send_packet()
+ self.assert_equal(self.vpp_session.state, BFDState.up, BFDState)
+
+ def test_hold_up_meticulous(self):
+ """ hold BFD session up - meticulous auth """
+ key = self.factory.create_random_key(
+ self, BFDAuthType.meticulous_keyed_sha1)
+ key.add_vpp_config()
+ self.vpp_session = VppBFDUDPSession(self, self.pg0,
+ self.pg0.remote_ip4, sha1_key=key)
+ self.vpp_session.add_vpp_config()
+ self.vpp_session.admin_up()
+ # specify sequence number so that it wraps
+ self.test_session = BFDTestSession(
+ self, self.pg0, AF_INET, sha1_key=key,
+ bfd_key_id=self.vpp_session.bfd_key_id,
+ our_seq_number=0xFFFFFFFF - 4)
+ bfd_session_up(self)
+ for dummy in range(30):
+ wait_for_bfd_packet(self)
+ self.test_session.inc_seq_num()
+ self.test_session.send_packet()
+ self.assert_equal(self.vpp_session.state, BFDState.up, BFDState)
+
+ def test_send_bad_seq_number(self):
+ """ session is not kept alive by msgs with bad sequence numbers"""
+ key = self.factory.create_random_key(
+ self, BFDAuthType.meticulous_keyed_sha1)
+ key.add_vpp_config()
+ self.vpp_session = VppBFDUDPSession(self, self.pg0,
+ self.pg0.remote_ip4, sha1_key=key)
+ self.vpp_session.add_vpp_config()
+ self.test_session = BFDTestSession(
+ self, self.pg0, AF_INET, sha1_key=key,
+ bfd_key_id=self.vpp_session.bfd_key_id)
+ bfd_session_up(self)
+ detection_time = self.test_session.detect_mult *\
+ self.vpp_session.required_min_rx / USEC_IN_SEC
+ send_until = time.time() + 2 * detection_time
+ while time.time() < send_until:
+ self.test_session.send_packet()
+ self.sleep(0.7 * self.vpp_session.required_min_rx / USEC_IN_SEC,
+ "time between bfd packets")
+ e = self.vapi.collect_events()
+ # session should be down now, because the sequence numbers weren't
+ # updated
+ self.assert_equal(len(e), 1, "number of bfd events")
+ verify_event(self, e[0], expected_state=BFDState.down)
+
+ def execute_rogue_session_scenario(self, vpp_bfd_udp_session,
+ legitimate_test_session,
+ rogue_test_session,
+ rogue_bfd_values=None):
+ """ execute a rogue session interaction scenario
+
+ 1. create vpp session, add config
+ 2. bring the legitimate session up
+ 3. copy the bfd values from legitimate session to rogue session
+ 4. apply rogue_bfd_values to rogue session
+ 5. set rogue session state to down
+ 6. send message to take the session down from the rogue session
+ 7. assert that the legitimate session is unaffected
+ """
+
+ self.vpp_session = vpp_bfd_udp_session
+ self.vpp_session.add_vpp_config()
+ self.test_session = legitimate_test_session
+ # bring vpp session up
+ bfd_session_up(self)
+ # send packet from rogue session
+ rogue_test_session.update(
+ my_discriminator=self.test_session.my_discriminator,
+ your_discriminator=self.test_session.your_discriminator,
+ desired_min_tx=self.test_session.desired_min_tx,
+ required_min_rx=self.test_session.required_min_rx,
+ detect_mult=self.test_session.detect_mult,
+ diag=self.test_session.diag,
+ state=self.test_session.state,
+ auth_type=self.test_session.auth_type)
+ if rogue_bfd_values:
+ rogue_test_session.update(**rogue_bfd_values)
+ rogue_test_session.update(state=BFDState.down)
+ rogue_test_session.send_packet()
+ wait_for_bfd_packet(self)
+ self.assert_equal(self.vpp_session.state, BFDState.up, BFDState)
+
+ def test_mismatch_auth(self):
+ """ session is not brought down by unauthenticated msg """
+ key = self.factory.create_random_key(self)
+ key.add_vpp_config()
+ vpp_session = VppBFDUDPSession(
+ self, self.pg0, self.pg0.remote_ip4, sha1_key=key)
+ legitimate_test_session = BFDTestSession(
+ self, self.pg0, AF_INET, sha1_key=key,
+ bfd_key_id=vpp_session.bfd_key_id)
+ rogue_test_session = BFDTestSession(self, self.pg0, AF_INET)
+ self.execute_rogue_session_scenario(vpp_session,
+ legitimate_test_session,
+ rogue_test_session)
+
+ def test_mismatch_bfd_key_id(self):
+ """ session is not brought down by msg with non-existent key-id """
+ key = self.factory.create_random_key(self)
+ key.add_vpp_config()
+ vpp_session = VppBFDUDPSession(
+ self, self.pg0, self.pg0.remote_ip4, sha1_key=key)
+ # pick a different random bfd key id
+ x = randint(0, 255)
+ while x == vpp_session.bfd_key_id:
+ x = randint(0, 255)
+ legitimate_test_session = BFDTestSession(
+ self, self.pg0, AF_INET, sha1_key=key,
+ bfd_key_id=vpp_session.bfd_key_id)
+ rogue_test_session = BFDTestSession(
+ self, self.pg0, AF_INET, sha1_key=key, bfd_key_id=x)
+ self.execute_rogue_session_scenario(vpp_session,
+ legitimate_test_session,
+ rogue_test_session)
+
+ def test_mismatched_auth_type(self):
+ """ session is not brought down by msg with wrong auth type """
+ key = self.factory.create_random_key(self)
+ key.add_vpp_config()
+ vpp_session = VppBFDUDPSession(
+ self, self.pg0, self.pg0.remote_ip4, sha1_key=key)
+ legitimate_test_session = BFDTestSession(
+ self, self.pg0, AF_INET, sha1_key=key,
+ bfd_key_id=vpp_session.bfd_key_id)
+ rogue_test_session = BFDTestSession(
+ self, self.pg0, AF_INET, sha1_key=key,
+ bfd_key_id=vpp_session.bfd_key_id)
+ self.execute_rogue_session_scenario(
+ vpp_session, legitimate_test_session, rogue_test_session,
+ {'auth_type': BFDAuthType.keyed_md5})
+
+ def test_restart(self):
+ """ simulate remote peer restart and resynchronization """
+ key = self.factory.create_random_key(
+ self, BFDAuthType.meticulous_keyed_sha1)
+ key.add_vpp_config()
+ self.vpp_session = VppBFDUDPSession(self, self.pg0,
+ self.pg0.remote_ip4, sha1_key=key)
+ self.vpp_session.add_vpp_config()
+ self.test_session = BFDTestSession(
+ self, self.pg0, AF_INET, sha1_key=key,
+ bfd_key_id=self.vpp_session.bfd_key_id, our_seq_number=0)
+ bfd_session_up(self)
+ # don't send any packets for 2*detection_time
+ detection_time = self.test_session.detect_mult *\
+ self.vpp_session.required_min_rx / USEC_IN_SEC
+ self.sleep(2 * detection_time, "simulating peer restart")
+ events = self.vapi.collect_events()
+ self.assert_equal(len(events), 1, "number of bfd events")
+ verify_event(self, events[0], expected_state=BFDState.down)
+ self.test_session.update(state=BFDState.down)
+ # reset sequence number
+ self.test_session.our_seq_number = 0
+ self.test_session.vpp_seq_number = None
+ # now throw away any pending packets
+ self.pg0.enable_capture()
+ self.test_session.my_discriminator = 0
+ bfd_session_up(self)
+
+
+@tag_run_solo
+class BFDAuthOnOffTestCase(VppTestCase):
+ """Bidirectional Forwarding Detection (BFD) (changing auth) """
+
+ pg0 = None
+ vpp_session = None
+ test_session = None
+
+ @classmethod
+ def setUpClass(cls):
+ super(BFDAuthOnOffTestCase, cls).setUpClass()
+ cls.vapi.cli("set log class bfd level debug")
+ try:
+ cls.create_pg_interfaces([0])
+ cls.pg0.config_ip4()
+ cls.pg0.admin_up()
+ cls.pg0.resolve_arp()
+
+ except Exception:
+ super(BFDAuthOnOffTestCase, cls).tearDownClass()
+ raise
+
+ @classmethod
+ def tearDownClass(cls):
+ super(BFDAuthOnOffTestCase, cls).tearDownClass()
+
+ def setUp(self):
+ super(BFDAuthOnOffTestCase, self).setUp()
+ self.factory = AuthKeyFactory()
+ self.vapi.want_bfd_events()
+ self.pg0.enable_capture()
+
+ def tearDown(self):
+ if not self.vpp_dead:
+ self.vapi.want_bfd_events(enable_disable=False)
+ self.vapi.collect_events() # clear the event queue
+ super(BFDAuthOnOffTestCase, self).tearDown()
+
+ def test_auth_on_immediate(self):
+ """ turn auth on without disturbing session state (immediate) """
+ key = self.factory.create_random_key(self)
+ key.add_vpp_config()
+ self.vpp_session = VppBFDUDPSession(self, self.pg0,
+ self.pg0.remote_ip4)
+ self.vpp_session.add_vpp_config()
+ self.test_session = BFDTestSession(self, self.pg0, AF_INET)
+ bfd_session_up(self)
+ for dummy in range(self.test_session.detect_mult * 2):
+ p = wait_for_bfd_packet(self)
+ self.assert_equal(p[BFD].state, BFDState.up, BFDState)
+ self.test_session.send_packet()
+ self.vpp_session.activate_auth(key)
+ self.test_session.bfd_key_id = self.vpp_session.bfd_key_id
+ self.test_session.sha1_key = key
+ for dummy in range(self.test_session.detect_mult * 2):
+ p = wait_for_bfd_packet(self)
+ self.assert_equal(p[BFD].state, BFDState.up, BFDState)
+ self.test_session.send_packet()
+ self.assert_equal(self.vpp_session.state, BFDState.up, BFDState)
+ self.assert_equal(len(self.vapi.collect_events()), 0,
+ "number of bfd events")
+
+ def test_auth_off_immediate(self):
+ """ turn auth off without disturbing session state (immediate) """
+ key = self.factory.create_random_key(self)
+ key.add_vpp_config()
+ self.vpp_session = VppBFDUDPSession(self, self.pg0,
+ self.pg0.remote_ip4, sha1_key=key)
+ self.vpp_session.add_vpp_config()
+ self.test_session = BFDTestSession(
+ self, self.pg0, AF_INET, sha1_key=key,
+ bfd_key_id=self.vpp_session.bfd_key_id)
+ bfd_session_up(self)
+ # self.vapi.want_bfd_events(enable_disable=0)
+ for dummy in range(self.test_session.detect_mult * 2):
+ p = wait_for_bfd_packet(self)
+ self.assert_equal(p[BFD].state, BFDState.up, BFDState)
+ self.test_session.inc_seq_num()
+ self.test_session.send_packet()
+ self.vpp_session.deactivate_auth()
+ self.test_session.bfd_key_id = None
+ self.test_session.sha1_key = None
+ for dummy in range(self.test_session.detect_mult * 2):
+ p = wait_for_bfd_packet(self)
+ self.assert_equal(p[BFD].state, BFDState.up, BFDState)
+ self.test_session.inc_seq_num()
+ self.test_session.send_packet()
+ self.assert_equal(self.vpp_session.state, BFDState.up, BFDState)
+ self.assert_equal(len(self.vapi.collect_events()), 0,
+ "number of bfd events")
+
+ def test_auth_change_key_immediate(self):
+ """ change auth key without disturbing session state (immediate) """
+ key1 = self.factory.create_random_key(self)
+ key1.add_vpp_config()
+ key2 = self.factory.create_random_key(self)
+ key2.add_vpp_config()
+ self.vpp_session = VppBFDUDPSession(self, self.pg0,
+ self.pg0.remote_ip4, sha1_key=key1)
+ self.vpp_session.add_vpp_config()
+ self.test_session = BFDTestSession(
+ self, self.pg0, AF_INET, sha1_key=key1,
+ bfd_key_id=self.vpp_session.bfd_key_id)
+ bfd_session_up(self)
+ for dummy in range(self.test_session.detect_mult * 2):
+ p = wait_for_bfd_packet(self)
+ self.assert_equal(p[BFD].state, BFDState.up, BFDState)
+ self.test_session.send_packet()
+ self.vpp_session.activate_auth(key2)
+ self.test_session.bfd_key_id = self.vpp_session.bfd_key_id
+ self.test_session.sha1_key = key2
+ for dummy in range(self.test_session.detect_mult * 2):
+ p = wait_for_bfd_packet(self)
+ self.assert_equal(p[BFD].state, BFDState.up, BFDState)
+ self.test_session.send_packet()
+ self.assert_equal(self.vpp_session.state, BFDState.up, BFDState)
+ self.assert_equal(len(self.vapi.collect_events()), 0,
+ "number of bfd events")
+
+ def test_auth_on_delayed(self):
+ """ turn auth on without disturbing session state (delayed) """
+ key = self.factory.create_random_key(self)
+ key.add_vpp_config()
+ self.vpp_session = VppBFDUDPSession(self, self.pg0,
+ self.pg0.remote_ip4)
+ self.vpp_session.add_vpp_config()
+ self.test_session = BFDTestSession(self, self.pg0, AF_INET)
+ bfd_session_up(self)
+ for dummy in range(self.test_session.detect_mult * 2):
+ wait_for_bfd_packet(self)
+ self.test_session.send_packet()
+ self.vpp_session.activate_auth(key, delayed=True)
+ for dummy in range(self.test_session.detect_mult * 2):
+ p = wait_for_bfd_packet(self)
+ self.assert_equal(p[BFD].state, BFDState.up, BFDState)
+ self.test_session.send_packet()
+ self.test_session.bfd_key_id = self.vpp_session.bfd_key_id
+ self.test_session.sha1_key = key
+ self.test_session.send_packet()
+ for dummy in range(self.test_session.detect_mult * 2):
+ p = wait_for_bfd_packet(self)
+ self.assert_equal(p[BFD].state, BFDState.up, BFDState)
+ self.test_session.send_packet()
+ self.assert_equal(self.vpp_session.state, BFDState.up, BFDState)
+ self.assert_equal(len(self.vapi.collect_events()), 0,
+ "number of bfd events")
+
+ def test_auth_off_delayed(self):
+ """ turn auth off without disturbing session state (delayed) """
+ key = self.factory.create_random_key(self)
+ key.add_vpp_config()
+ self.vpp_session = VppBFDUDPSession(self, self.pg0,
+ self.pg0.remote_ip4, sha1_key=key)
+ self.vpp_session.add_vpp_config()
+ self.test_session = BFDTestSession(
+ self, self.pg0, AF_INET, sha1_key=key,
+ bfd_key_id=self.vpp_session.bfd_key_id)
+ bfd_session_up(self)
+ for dummy in range(self.test_session.detect_mult * 2):
+ p = wait_for_bfd_packet(self)
+ self.assert_equal(p[BFD].state, BFDState.up, BFDState)
+ self.test_session.send_packet()
+ self.vpp_session.deactivate_auth(delayed=True)
+ for dummy in range(self.test_session.detect_mult * 2):
+ p = wait_for_bfd_packet(self)
+ self.assert_equal(p[BFD].state, BFDState.up, BFDState)
+ self.test_session.send_packet()
+ self.test_session.bfd_key_id = None
+ self.test_session.sha1_key = None
+ self.test_session.send_packet()
+ for dummy in range(self.test_session.detect_mult * 2):
+ p = wait_for_bfd_packet(self)
+ self.assert_equal(p[BFD].state, BFDState.up, BFDState)
+ self.test_session.send_packet()
+ self.assert_equal(self.vpp_session.state, BFDState.up, BFDState)
+ self.assert_equal(len(self.vapi.collect_events()), 0,
+ "number of bfd events")
+
+ def test_auth_change_key_delayed(self):
+ """ change auth key without disturbing session state (delayed) """
+ key1 = self.factory.create_random_key(self)
+ key1.add_vpp_config()
+ key2 = self.factory.create_random_key(self)
+ key2.add_vpp_config()
+ self.vpp_session = VppBFDUDPSession(self, self.pg0,
+ self.pg0.remote_ip4, sha1_key=key1)
+ self.vpp_session.add_vpp_config()
+ self.vpp_session.admin_up()
+ self.test_session = BFDTestSession(
+ self, self.pg0, AF_INET, sha1_key=key1,
+ bfd_key_id=self.vpp_session.bfd_key_id)
+ bfd_session_up(self)
+ for dummy in range(self.test_session.detect_mult * 2):
+ p = wait_for_bfd_packet(self)
+ self.assert_equal(p[BFD].state, BFDState.up, BFDState)
+ self.test_session.send_packet()
+ self.vpp_session.activate_auth(key2, delayed=True)
+ for dummy in range(self.test_session.detect_mult * 2):
+ p = wait_for_bfd_packet(self)
+ self.assert_equal(p[BFD].state, BFDState.up, BFDState)
+ self.test_session.send_packet()
+ self.test_session.bfd_key_id = self.vpp_session.bfd_key_id
+ self.test_session.sha1_key = key2
+ self.test_session.send_packet()
+ for dummy in range(self.test_session.detect_mult * 2):
+ p = wait_for_bfd_packet(self)
+ self.assert_equal(p[BFD].state, BFDState.up, BFDState)
+ self.test_session.send_packet()
+ self.assert_equal(self.vpp_session.state, BFDState.up, BFDState)
+ self.assert_equal(len(self.vapi.collect_events()), 0,
+ "number of bfd events")
+
+
+@tag_run_solo
+class BFDCLITestCase(VppTestCase):
+ """Bidirectional Forwarding Detection (BFD) (CLI) """
+ pg0 = None
+
+ @classmethod
+ def setUpClass(cls):
+ super(BFDCLITestCase, cls).setUpClass()
+ cls.vapi.cli("set log class bfd level debug")
+ try:
+ cls.create_pg_interfaces((0,))
+ cls.pg0.config_ip4()
+ cls.pg0.config_ip6()
+ cls.pg0.resolve_arp()
+ cls.pg0.resolve_ndp()
+
+ except Exception:
+ super(BFDCLITestCase, cls).tearDownClass()
+ raise
+
+ @classmethod
+ def tearDownClass(cls):
+ super(BFDCLITestCase, cls).tearDownClass()
+
+ def setUp(self):
+ super(BFDCLITestCase, self).setUp()
+ self.factory = AuthKeyFactory()
+ self.pg0.enable_capture()
+
+ def tearDown(self):
+ try:
+ self.vapi.want_bfd_events(enable_disable=False)
+ except UnexpectedApiReturnValueError:
+ # some tests aren't subscribed, so this is not an issue
+ pass
+ self.vapi.collect_events() # clear the event queue
+ super(BFDCLITestCase, self).tearDown()
+
+ def cli_verify_no_response(self, cli):
+ """ execute a CLI, asserting that the response is empty """
+ self.assert_equal(self.vapi.cli(cli),
+ "",
+ "CLI command response")
+
+ def cli_verify_response(self, cli, expected):
+ """ execute a CLI, asserting that the response matches expectation """
+ try:
+ reply = self.vapi.cli(cli)
+ except CliFailedCommandError as cli_error:
+ reply = str(cli_error)
+ self.assert_equal(reply.strip(),
+ expected,
+ "CLI command response")
+
+ def test_show(self):
+ """ show commands """
+ k1 = self.factory.create_random_key(self)
+ k1.add_vpp_config()
+ k2 = self.factory.create_random_key(
+ self, auth_type=BFDAuthType.meticulous_keyed_sha1)
+ k2.add_vpp_config()
+ s1 = VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip4)
+ s1.add_vpp_config()
+ s2 = VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip6, af=AF_INET6,
+ sha1_key=k2)
+ s2.add_vpp_config()
+ self.logger.info(self.vapi.ppcli("show bfd keys"))
+ self.logger.info(self.vapi.ppcli("show bfd sessions"))
+ self.logger.info(self.vapi.ppcli("show bfd"))
+
+ def test_set_del_sha1_key(self):
+ """ set/delete SHA1 auth key """
+ k = self.factory.create_random_key(self)
+ self.registry.register(k, self.logger)
+ self.cli_verify_no_response(
+ "bfd key set conf-key-id %s type keyed-sha1 secret %s" %
+ (k.conf_key_id,
+ "".join("{:02x}".format(scapy.compat.orb(c)) for c in k.key)))
+ self.assertTrue(k.query_vpp_config())
+ self.vpp_session = VppBFDUDPSession(
+ self, self.pg0, self.pg0.remote_ip4, sha1_key=k)
+ self.vpp_session.add_vpp_config()
+ self.test_session = \
+ BFDTestSession(self, self.pg0, AF_INET, sha1_key=k,
+ bfd_key_id=self.vpp_session.bfd_key_id)
+ self.vapi.want_bfd_events()
+ bfd_session_up(self)
+ bfd_session_down(self)
+ # try to replace the secret for the key - should fail because the key
+ # is in-use
+ k2 = self.factory.create_random_key(self)
+ self.cli_verify_response(
+ "bfd key set conf-key-id %s type keyed-sha1 secret %s" %
+ (k.conf_key_id,
+ "".join("{:02x}".format(scapy.compat.orb(c)) for c in k2.key)),
+ "bfd key set: `bfd_auth_set_key' API call failed, "
+ "rv=-103:BFD object in use")
+ # manipulating the session using old secret should still work
+ bfd_session_up(self)
+ bfd_session_down(self)
+ self.vpp_session.remove_vpp_config()
+ self.cli_verify_no_response(
+ "bfd key del conf-key-id %s" % k.conf_key_id)
+ self.assertFalse(k.query_vpp_config())
+
+ def test_set_del_meticulous_sha1_key(self):
+ """ set/delete meticulous SHA1 auth key """
+ k = self.factory.create_random_key(
+ self, auth_type=BFDAuthType.meticulous_keyed_sha1)
+ self.registry.register(k, self.logger)
+ self.cli_verify_no_response(
+ "bfd key set conf-key-id %s type meticulous-keyed-sha1 secret %s" %
+ (k.conf_key_id,
+ "".join("{:02x}".format(scapy.compat.orb(c)) for c in k.key)))
+ self.assertTrue(k.query_vpp_config())
+ self.vpp_session = VppBFDUDPSession(self, self.pg0,
+ self.pg0.remote_ip6, af=AF_INET6,
+ sha1_key=k)
+ self.vpp_session.add_vpp_config()
+ self.vpp_session.admin_up()
+ self.test_session = \
+ BFDTestSession(self, self.pg0, AF_INET6, sha1_key=k,
+ bfd_key_id=self.vpp_session.bfd_key_id)
+ self.vapi.want_bfd_events()
+ bfd_session_up(self)
+ bfd_session_down(self)
+ # try to replace the secret for the key - should fail because the key
+ # is in-use
+ k2 = self.factory.create_random_key(self)
+ self.cli_verify_response(
+ "bfd key set conf-key-id %s type keyed-sha1 secret %s" %
+ (k.conf_key_id,
+ "".join("{:02x}".format(scapy.compat.orb(c)) for c in k2.key)),
+ "bfd key set: `bfd_auth_set_key' API call failed, "
+ "rv=-103:BFD object in use")
+ # manipulating the session using old secret should still work
+ bfd_session_up(self)
+ bfd_session_down(self)
+ self.vpp_session.remove_vpp_config()
+ self.cli_verify_no_response(
+ "bfd key del conf-key-id %s" % k.conf_key_id)
+ self.assertFalse(k.query_vpp_config())
+
+ def test_add_mod_del_bfd_udp(self):
+ """ create/modify/delete IPv4 BFD UDP session """
+ vpp_session = VppBFDUDPSession(
+ self, self.pg0, self.pg0.remote_ip4)
+ self.registry.register(vpp_session, self.logger)
+ cli_add_cmd = "bfd udp session add interface %s local-addr %s " \
+ "peer-addr %s desired-min-tx %s required-min-rx %s "\
+ "detect-mult %s" % (self.pg0.name, self.pg0.local_ip4,
+ self.pg0.remote_ip4,
+ vpp_session.desired_min_tx,
+ vpp_session.required_min_rx,
+ vpp_session.detect_mult)
+ self.cli_verify_no_response(cli_add_cmd)
+ # 2nd add should fail
+ self.cli_verify_response(
+ cli_add_cmd,
+ "bfd udp session add: `bfd_add_add_session' API call"
+ " failed, rv=-101:Duplicate BFD object")
+ verify_bfd_session_config(self, vpp_session)
+ mod_session = VppBFDUDPSession(
+ self, self.pg0, self.pg0.remote_ip4,
+ required_min_rx=2 * vpp_session.required_min_rx,
+ desired_min_tx=3 * vpp_session.desired_min_tx,
+ detect_mult=4 * vpp_session.detect_mult)
+ self.cli_verify_no_response(
+ "bfd udp session mod interface %s local-addr %s peer-addr %s "
+ "desired-min-tx %s required-min-rx %s detect-mult %s" %
+ (self.pg0.name, self.pg0.local_ip4, self.pg0.remote_ip4,
+ mod_session.desired_min_tx, mod_session.required_min_rx,
+ mod_session.detect_mult))
+ verify_bfd_session_config(self, mod_session)
+ cli_del_cmd = "bfd udp session del interface %s local-addr %s "\
+ "peer-addr %s" % (self.pg0.name,
+ self.pg0.local_ip4, self.pg0.remote_ip4)
+ self.cli_verify_no_response(cli_del_cmd)
+ # 2nd del is expected to fail
+ self.cli_verify_response(
+ cli_del_cmd, "bfd udp session del: `bfd_udp_del_session' API call"
+ " failed, rv=-102:No such BFD object")
+ self.assertFalse(vpp_session.query_vpp_config())
+
+ def test_add_mod_del_bfd_udp6(self):
+ """ create/modify/delete IPv6 BFD UDP session """
+ vpp_session = VppBFDUDPSession(
+ self, self.pg0, self.pg0.remote_ip6, af=AF_INET6)
+ self.registry.register(vpp_session, self.logger)
+ cli_add_cmd = "bfd udp session add interface %s local-addr %s " \
+ "peer-addr %s desired-min-tx %s required-min-rx %s "\
+ "detect-mult %s" % (self.pg0.name, self.pg0.local_ip6,
+ self.pg0.remote_ip6,
+ vpp_session.desired_min_tx,
+ vpp_session.required_min_rx,
+ vpp_session.detect_mult)
+ self.cli_verify_no_response(cli_add_cmd)
+ # 2nd add should fail
+ self.cli_verify_response(
+ cli_add_cmd,
+ "bfd udp session add: `bfd_add_add_session' API call"
+ " failed, rv=-101:Duplicate BFD object")
+ verify_bfd_session_config(self, vpp_session)
+ mod_session = VppBFDUDPSession(
+ self, self.pg0, self.pg0.remote_ip6, af=AF_INET6,
+ required_min_rx=2 * vpp_session.required_min_rx,
+ desired_min_tx=3 * vpp_session.desired_min_tx,
+ detect_mult=4 * vpp_session.detect_mult)
+ self.cli_verify_no_response(
+ "bfd udp session mod interface %s local-addr %s peer-addr %s "
+ "desired-min-tx %s required-min-rx %s detect-mult %s" %
+ (self.pg0.name, self.pg0.local_ip6, self.pg0.remote_ip6,
+ mod_session.desired_min_tx,
+ mod_session.required_min_rx, mod_session.detect_mult))
+ verify_bfd_session_config(self, mod_session)
+ cli_del_cmd = "bfd udp session del interface %s local-addr %s "\
+ "peer-addr %s" % (self.pg0.name,
+ self.pg0.local_ip6, self.pg0.remote_ip6)
+ self.cli_verify_no_response(cli_del_cmd)
+ # 2nd del is expected to fail
+ self.cli_verify_response(
+ cli_del_cmd,
+ "bfd udp session del: `bfd_udp_del_session' API call"
+ " failed, rv=-102:No such BFD object")
+ self.assertFalse(vpp_session.query_vpp_config())
+
+ def test_add_mod_del_bfd_udp_auth(self):
+ """ create/modify/delete IPv4 BFD UDP session (authenticated) """
+ key = self.factory.create_random_key(self)
+ key.add_vpp_config()
+ vpp_session = VppBFDUDPSession(
+ self, self.pg0, self.pg0.remote_ip4, sha1_key=key)
+ self.registry.register(vpp_session, self.logger)
+ cli_add_cmd = "bfd udp session add interface %s local-addr %s " \
+ "peer-addr %s desired-min-tx %s required-min-rx %s "\
+ "detect-mult %s conf-key-id %s bfd-key-id %s"\
+ % (self.pg0.name, self.pg0.local_ip4, self.pg0.remote_ip4,
+ vpp_session.desired_min_tx, vpp_session.required_min_rx,
+ vpp_session.detect_mult, key.conf_key_id,
+ vpp_session.bfd_key_id)
+ self.cli_verify_no_response(cli_add_cmd)
+ # 2nd add should fail
+ self.cli_verify_response(
+ cli_add_cmd,
+ "bfd udp session add: `bfd_add_add_session' API call"
+ " failed, rv=-101:Duplicate BFD object")
+ verify_bfd_session_config(self, vpp_session)
+ mod_session = VppBFDUDPSession(
+ self, self.pg0, self.pg0.remote_ip4, sha1_key=key,
+ bfd_key_id=vpp_session.bfd_key_id,
+ required_min_rx=2 * vpp_session.required_min_rx,
+ desired_min_tx=3 * vpp_session.desired_min_tx,
+ detect_mult=4 * vpp_session.detect_mult)
+ self.cli_verify_no_response(
+ "bfd udp session mod interface %s local-addr %s peer-addr %s "
+ "desired-min-tx %s required-min-rx %s detect-mult %s" %
+ (self.pg0.name, self.pg0.local_ip4, self.pg0.remote_ip4,
+ mod_session.desired_min_tx,
+ mod_session.required_min_rx, mod_session.detect_mult))
+ verify_bfd_session_config(self, mod_session)
+ cli_del_cmd = "bfd udp session del interface %s local-addr %s "\
+ "peer-addr %s" % (self.pg0.name,
+ self.pg0.local_ip4, self.pg0.remote_ip4)
+ self.cli_verify_no_response(cli_del_cmd)
+ # 2nd del is expected to fail
+ self.cli_verify_response(
+ cli_del_cmd,
+ "bfd udp session del: `bfd_udp_del_session' API call"
+ " failed, rv=-102:No such BFD object")
+ self.assertFalse(vpp_session.query_vpp_config())
+
+ def test_add_mod_del_bfd_udp6_auth(self):
+ """ create/modify/delete IPv6 BFD UDP session (authenticated) """
+ key = self.factory.create_random_key(
+ self, auth_type=BFDAuthType.meticulous_keyed_sha1)
+ key.add_vpp_config()
+ vpp_session = VppBFDUDPSession(
+ self, self.pg0, self.pg0.remote_ip6, af=AF_INET6, sha1_key=key)
+ self.registry.register(vpp_session, self.logger)
+ cli_add_cmd = "bfd udp session add interface %s local-addr %s " \
+ "peer-addr %s desired-min-tx %s required-min-rx %s "\
+ "detect-mult %s conf-key-id %s bfd-key-id %s" \
+ % (self.pg0.name, self.pg0.local_ip6, self.pg0.remote_ip6,
+ vpp_session.desired_min_tx, vpp_session.required_min_rx,
+ vpp_session.detect_mult, key.conf_key_id,
+ vpp_session.bfd_key_id)
+ self.cli_verify_no_response(cli_add_cmd)
+ # 2nd add should fail
+ self.cli_verify_response(
+ cli_add_cmd,
+ "bfd udp session add: `bfd_add_add_session' API call"
+ " failed, rv=-101:Duplicate BFD object")
+ verify_bfd_session_config(self, vpp_session)
+ mod_session = VppBFDUDPSession(
+ self, self.pg0, self.pg0.remote_ip6, af=AF_INET6, sha1_key=key,
+ bfd_key_id=vpp_session.bfd_key_id,
+ required_min_rx=2 * vpp_session.required_min_rx,
+ desired_min_tx=3 * vpp_session.desired_min_tx,
+ detect_mult=4 * vpp_session.detect_mult)
+ self.cli_verify_no_response(
+ "bfd udp session mod interface %s local-addr %s peer-addr %s "
+ "desired-min-tx %s required-min-rx %s detect-mult %s" %
+ (self.pg0.name, self.pg0.local_ip6, self.pg0.remote_ip6,
+ mod_session.desired_min_tx,
+ mod_session.required_min_rx, mod_session.detect_mult))
+ verify_bfd_session_config(self, mod_session)
+ cli_del_cmd = "bfd udp session del interface %s local-addr %s "\
+ "peer-addr %s" % (self.pg0.name,
+ self.pg0.local_ip6, self.pg0.remote_ip6)
+ self.cli_verify_no_response(cli_del_cmd)
+ # 2nd del is expected to fail
+ self.cli_verify_response(
+ cli_del_cmd,
+ "bfd udp session del: `bfd_udp_del_session' API call"
+ " failed, rv=-102:No such BFD object")
+ self.assertFalse(vpp_session.query_vpp_config())
+
+ def test_auth_on_off(self):
+ """ turn authentication on and off """
+ key = self.factory.create_random_key(
+ self, auth_type=BFDAuthType.meticulous_keyed_sha1)
+ key.add_vpp_config()
+ session = VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip4)
+ auth_session = VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip4,
+ sha1_key=key)
+ session.add_vpp_config()
+ cli_activate = \
+ "bfd udp session auth activate interface %s local-addr %s "\
+ "peer-addr %s conf-key-id %s bfd-key-id %s"\
+ % (self.pg0.name, self.pg0.local_ip4, self.pg0.remote_ip4,
+ key.conf_key_id, auth_session.bfd_key_id)
+ self.cli_verify_no_response(cli_activate)
+ verify_bfd_session_config(self, auth_session)
+ self.cli_verify_no_response(cli_activate)
+ verify_bfd_session_config(self, auth_session)
+ cli_deactivate = \
+ "bfd udp session auth deactivate interface %s local-addr %s "\
+ "peer-addr %s "\
+ % (self.pg0.name, self.pg0.local_ip4, self.pg0.remote_ip4)
+ self.cli_verify_no_response(cli_deactivate)
+ verify_bfd_session_config(self, session)
+ self.cli_verify_no_response(cli_deactivate)
+ verify_bfd_session_config(self, session)
+
+ def test_auth_on_off_delayed(self):
+ """ turn authentication on and off (delayed) """
+ key = self.factory.create_random_key(
+ self, auth_type=BFDAuthType.meticulous_keyed_sha1)
+ key.add_vpp_config()
+ session = VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip4)
+ auth_session = VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip4,
+ sha1_key=key)
+ session.add_vpp_config()
+ cli_activate = \
+ "bfd udp session auth activate interface %s local-addr %s "\
+ "peer-addr %s conf-key-id %s bfd-key-id %s delayed yes"\
+ % (self.pg0.name, self.pg0.local_ip4, self.pg0.remote_ip4,
+ key.conf_key_id, auth_session.bfd_key_id)
+ self.cli_verify_no_response(cli_activate)
+ verify_bfd_session_config(self, auth_session)
+ self.cli_verify_no_response(cli_activate)
+ verify_bfd_session_config(self, auth_session)
+ cli_deactivate = \
+ "bfd udp session auth deactivate interface %s local-addr %s "\
+ "peer-addr %s delayed yes"\
+ % (self.pg0.name, self.pg0.local_ip4, self.pg0.remote_ip4)
+ self.cli_verify_no_response(cli_deactivate)
+ verify_bfd_session_config(self, session)
+ self.cli_verify_no_response(cli_deactivate)
+ verify_bfd_session_config(self, session)
+
+ def test_admin_up_down(self):
+ """ put session admin-up and admin-down """
+ session = VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip4)
+ session.add_vpp_config()
+ cli_down = \
+ "bfd udp session set-flags admin down interface %s local-addr %s "\
+ "peer-addr %s "\
+ % (self.pg0.name, self.pg0.local_ip4, self.pg0.remote_ip4)
+ cli_up = \
+ "bfd udp session set-flags admin up interface %s local-addr %s "\
+ "peer-addr %s "\
+ % (self.pg0.name, self.pg0.local_ip4, self.pg0.remote_ip4)
+ self.cli_verify_no_response(cli_down)
+ verify_bfd_session_config(self, session, state=BFDState.admin_down)
+ self.cli_verify_no_response(cli_up)
+ verify_bfd_session_config(self, session, state=BFDState.down)
+
+ def test_set_del_udp_echo_source(self):
+ """ set/del udp echo source """
+ self.create_loopback_interfaces(1)
+ self.loopback0 = self.lo_interfaces[0]
+ self.loopback0.admin_up()
+ self.cli_verify_response("show bfd echo-source",
+ "UDP echo source is not set.")
+ cli_set = "bfd udp echo-source set interface %s" % self.loopback0.name
+ self.cli_verify_no_response(cli_set)
+ self.cli_verify_response("show bfd echo-source",
+ "UDP echo source is: %s\n"
+ "IPv4 address usable as echo source: none\n"
+ "IPv6 address usable as echo source: none" %
+ self.loopback0.name)
+ self.loopback0.config_ip4()
+ echo_ip4 = str(ipaddress.IPv4Address(int(ipaddress.IPv4Address(
+ self.loopback0.local_ip4)) ^ 1))
+ self.cli_verify_response("show bfd echo-source",
+ "UDP echo source is: %s\n"
+ "IPv4 address usable as echo source: %s\n"
+ "IPv6 address usable as echo source: none" %
+ (self.loopback0.name, echo_ip4))
+ echo_ip6 = str(ipaddress.IPv6Address(int(ipaddress.IPv6Address(
+ self.loopback0.local_ip6)) ^ 1))
+ self.loopback0.config_ip6()
+ self.cli_verify_response("show bfd echo-source",
+ "UDP echo source is: %s\n"
+ "IPv4 address usable as echo source: %s\n"
+ "IPv6 address usable as echo source: %s" %
+ (self.loopback0.name, echo_ip4, echo_ip6))
+ cli_del = "bfd udp echo-source del"
+ self.cli_verify_no_response(cli_del)
+ self.cli_verify_response("show bfd echo-source",
+ "UDP echo source is not set.")
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_bier.py b/test/test_bier.py
new file mode 100644
index 00000000000..2f649bbde53
--- /dev/null
+++ b/test/test_bier.py
@@ -0,0 +1,862 @@
+#!/usr/bin/env python3
+
+import unittest
+
+from framework import VppTestCase, VppTestRunner, running_extended_tests
+from vpp_ip import DpoProto
+from vpp_ip_route import VppIpRoute, VppRoutePath, \
+ VppMplsTable, VppIpMRoute, VppMRoutePath, VppIpTable, \
+ MPLS_LABEL_INVALID, \
+ VppMplsLabel, FibPathProto, FibPathType
+from vpp_bier import BIER_HDR_PAYLOAD, VppBierImp, VppBierDispEntry, \
+ VppBierDispTable, VppBierTable, VppBierTableID, VppBierRoute
+from vpp_udp_encap import VppUdpEncap
+from vpp_papi import VppEnum
+
+import scapy.compat
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, UDP
+from scapy.layers.inet6 import IPv6
+from scapy.contrib.mpls import MPLS
+from scapy.contrib.bier import BIER, BIERLength, BIFT
+
+NUM_PKTS = 67
+
+
+class TestBFIB(VppTestCase):
+ """ BIER FIB Test Case """
+
+ def test_bfib(self):
+ """ BFIB Unit Tests """
+ error = self.vapi.cli("test bier")
+
+ if error:
+ self.logger.critical(error)
+ self.assertNotIn("Failed", error)
+
+
+class TestBier(VppTestCase):
+ """ BIER Test Case """
+
+ def setUp(self):
+ super(TestBier, self).setUp()
+
+ # create 2 pg interfaces
+ self.create_pg_interfaces(range(3))
+
+ # create the default MPLS table
+ self.tables = []
+ tbl = VppMplsTable(self, 0)
+ tbl.add_vpp_config()
+ self.tables.append(tbl)
+
+ tbl = VppIpTable(self, 10)
+ tbl.add_vpp_config()
+ self.tables.append(tbl)
+
+ # setup both interfaces
+ for i in self.pg_interfaces:
+ if i == self.pg2:
+ i.set_table_ip4(10)
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+ i.enable_mpls()
+
+ def tearDown(self):
+ for i in self.pg_interfaces:
+ i.disable_mpls()
+ i.unconfig_ip4()
+ i.set_table_ip4(0)
+ i.admin_down()
+ super(TestBier, self).tearDown()
+
+ def bier_midpoint(self, hdr_len_id, n_bytes, max_bp):
+ """BIER midpoint"""
+
+ #
+ # Add a BIER table for sub-domain 0, set 0, and BSL 256
+ #
+ bti = VppBierTableID(0, 0, hdr_len_id)
+ bt = VppBierTable(self, bti, 77)
+ bt.add_vpp_config()
+
+ #
+ # A packet with no bits set gets dropped
+ #
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ MPLS(label=77, ttl=255) /
+ BIER(length=hdr_len_id) /
+ IPv6(src=self.pg0.remote_ip6, dst=self.pg0.remote_ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw())
+ pkts = [p]
+
+ self.send_and_assert_no_replies(self.pg0, pkts,
+ "Empty Bit-String")
+
+ #
+ # Add a BIER route for each bit-position in the table via a different
+ # next-hop. Testing whether the BIER walk and replicate forwarding
+ # function works for all bit posisitons.
+ #
+ nh_routes = []
+ bier_routes = []
+ for i in range(1, max_bp+1):
+ nh = "10.0.%d.%d" % (i / 255, i % 255)
+ nh_routes.append(
+ VppIpRoute(self, nh, 32,
+ [VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index,
+ labels=[VppMplsLabel(2000+i)])]))
+ nh_routes[-1].add_vpp_config()
+
+ bier_routes.append(
+ VppBierRoute(self, bti, i,
+ [VppRoutePath(nh, 0xffffffff,
+ labels=[VppMplsLabel(100+i)])]))
+ bier_routes[-1].add_vpp_config()
+
+ #
+ # A packet with all bits set gets replicated once for each bit
+ #
+ pkt_sizes = [64, 1400]
+
+ for pkt_size in pkt_sizes:
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ MPLS(label=77, ttl=255) /
+ BIER(length=hdr_len_id,
+ BitString=scapy.compat.chb(255)*n_bytes) /
+ IPv6(src=self.pg0.remote_ip6, dst=self.pg0.remote_ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(scapy.compat.chb(5) * pkt_size))
+ pkts = p
+
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg1.get_capture(max_bp)
+
+ for rxp in rx:
+ #
+ # The packets are not required to be sent in bit-position order
+ # when we setup the routes above we used the bit-position to
+ # construct the out-label. so use that here to determine the BP
+ #
+ olabel = rxp[MPLS]
+ bp = olabel.label - 2000
+
+ blabel = olabel[MPLS].payload
+ self.assertEqual(blabel.label, 100+bp)
+ self.assertEqual(blabel.ttl, 254)
+
+ bier_hdr = blabel[MPLS].payload
+
+ self.assertEqual(bier_hdr.id, 5)
+ self.assertEqual(bier_hdr.version, 0)
+ self.assertEqual(bier_hdr.length, hdr_len_id)
+ self.assertEqual(bier_hdr.entropy, 0)
+ self.assertEqual(bier_hdr.OAM, 0)
+ self.assertEqual(bier_hdr.RSV, 0)
+ self.assertEqual(bier_hdr.DSCP, 0)
+ self.assertEqual(bier_hdr.Proto, 5)
+
+ # The bit-string should consist only of the BP given by i.
+ byte_array = [b'\0'] * (n_bytes)
+ byte_val = scapy.compat.chb(1 << (bp - 1) % 8)
+ byte_pos = n_bytes - (((bp - 1) // 8) + 1)
+ byte_array[byte_pos] = byte_val
+ bitstring = b''.join(byte_array)
+
+ self.assertEqual(len(bitstring), len(bier_hdr.BitString))
+ self.assertEqual(bitstring, bier_hdr.BitString)
+
+ #
+ # cleanup. not strictly necessary, but it's much quicker this way
+ # because the bier_fib_dump and ip_fib_dump will be empty when the
+ # auto-cleanup kicks in
+ #
+ for br in bier_routes:
+ br.remove_vpp_config()
+ for nhr in nh_routes:
+ nhr.remove_vpp_config()
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ def test_bier_midpoint_1024(self):
+ """BIER midpoint BSL:1024"""
+ self.bier_midpoint(BIERLength.BIER_LEN_1024, 128, 1024)
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ def test_bier_midpoint_512(self):
+ """BIER midpoint BSL:512"""
+ self.bier_midpoint(BIERLength.BIER_LEN_512, 64, 512)
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ def test_bier_midpoint_256(self):
+ """BIER midpoint BSL:256"""
+ self.bier_midpoint(BIERLength.BIER_LEN_256, 32, 256)
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ def test_bier_midpoint_128(self):
+ """BIER midpoint BSL:128"""
+ self.bier_midpoint(BIERLength.BIER_LEN_128, 16, 128)
+
+ def test_bier_midpoint_64(self):
+ """BIER midpoint BSL:64"""
+ self.bier_midpoint(BIERLength.BIER_LEN_64, 8, 64)
+
+ def test_bier_load_balance(self):
+ """BIER load-balance"""
+
+ #
+ # Add a BIER table for sub-domain 0, set 0, and BSL 256
+ #
+ bti = VppBierTableID(0, 0, BIERLength.BIER_LEN_64)
+ bt = VppBierTable(self, bti, 77)
+ bt.add_vpp_config()
+
+ #
+ # packets with varying entropy
+ #
+ pkts = []
+ for ii in range(257):
+ pkts.append((Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ MPLS(label=77, ttl=255) /
+ BIER(length=BIERLength.BIER_LEN_64,
+ entropy=ii,
+ BitString=scapy.compat.chb(255)*16) /
+ IPv6(src=self.pg0.remote_ip6,
+ dst=self.pg0.remote_ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw()))
+
+ #
+ # 4 next hops
+ #
+ nhs = [{'ip': "10.0.0.1", 'label': 201},
+ {'ip': "10.0.0.2", 'label': 202},
+ {'ip': "10.0.0.3", 'label': 203},
+ {'ip': "10.0.0.4", 'label': 204}]
+
+ for nh in nhs:
+ ipr = VppIpRoute(
+ self, nh['ip'], 32,
+ [VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index,
+ labels=[VppMplsLabel(nh['label'])])])
+ ipr.add_vpp_config()
+
+ bier_route = VppBierRoute(
+ self, bti, 1,
+ [VppRoutePath(nhs[0]['ip'], 0xffffffff,
+ labels=[VppMplsLabel(101)]),
+ VppRoutePath(nhs[1]['ip'], 0xffffffff,
+ labels=[VppMplsLabel(101)])])
+ bier_route.add_vpp_config()
+
+ rx = self.send_and_expect(self.pg0, pkts, self.pg1)
+
+ #
+ # we should have recieved a packet from each neighbor
+ #
+ for nh in nhs[:2]:
+ self.assertTrue(sum(p[MPLS].label == nh['label'] for p in rx))
+
+ #
+ # add the other paths
+ #
+ bier_route.update_paths(
+ [VppRoutePath(nhs[0]['ip'], 0xffffffff,
+ labels=[VppMplsLabel(101)]),
+ VppRoutePath(nhs[1]['ip'], 0xffffffff,
+ labels=[VppMplsLabel(101)]),
+ VppRoutePath(nhs[2]['ip'], 0xffffffff,
+ labels=[VppMplsLabel(101)]),
+ VppRoutePath(nhs[3]['ip'], 0xffffffff,
+ labels=[VppMplsLabel(101)])])
+
+ rx = self.send_and_expect(self.pg0, pkts, self.pg1)
+
+ for nh in nhs:
+ self.assertTrue(sum(p[MPLS].label == nh['label'] for p in rx))
+
+ #
+ # remove first two paths
+ #
+ bier_route.remove_path(VppRoutePath(nhs[0]['ip'], 0xffffffff,
+ labels=[VppMplsLabel(101)]))
+ bier_route.remove_path(VppRoutePath(nhs[1]['ip'], 0xffffffff,
+ labels=[VppMplsLabel(101)]))
+
+ rx = self.send_and_expect(self.pg0, pkts, self.pg1)
+ for nh in nhs[2:]:
+ self.assertTrue(sum(p[MPLS].label == nh['label'] for p in rx))
+
+ #
+ # remove the last of the paths, deleteing the entry
+ #
+ bier_route.remove_all_paths()
+
+ self.send_and_assert_no_replies(self.pg0, pkts)
+
+ def test_bier_head(self):
+ """BIER head"""
+
+ MRouteItfFlags = VppEnum.vl_api_mfib_itf_flags_t
+ MRouteEntryFlags = VppEnum.vl_api_mfib_entry_flags_t
+
+ #
+ # Add a BIER table for sub-domain 0, set 0, and BSL 256
+ #
+ bti = VppBierTableID(0, 0, BIERLength.BIER_LEN_256)
+ bt = VppBierTable(self, bti, 77)
+ bt.add_vpp_config()
+
+ #
+ # 2 bit positions via two next hops
+ #
+ nh1 = "10.0.0.1"
+ nh2 = "10.0.0.2"
+ ip_route_1 = VppIpRoute(self, nh1, 32,
+ [VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index,
+ labels=[VppMplsLabel(2001)])])
+ ip_route_2 = VppIpRoute(self, nh2, 32,
+ [VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index,
+ labels=[VppMplsLabel(2002)])])
+ ip_route_1.add_vpp_config()
+ ip_route_2.add_vpp_config()
+
+ bier_route_1 = VppBierRoute(self, bti, 1,
+ [VppRoutePath(nh1, 0xffffffff,
+ labels=[VppMplsLabel(101)])])
+ bier_route_2 = VppBierRoute(self, bti, 2,
+ [VppRoutePath(nh2, 0xffffffff,
+ labels=[VppMplsLabel(102)])])
+ bier_route_1.add_vpp_config()
+ bier_route_2.add_vpp_config()
+
+ #
+ # An imposition object with both bit-positions set
+ #
+ bi = VppBierImp(self, bti, 333, scapy.compat.chb(0x3) * 32)
+ bi.add_vpp_config()
+
+ #
+ # Add a multicast route that will forward into the BIER doamin
+ #
+ route_ing_232_1_1_1 = VppIpMRoute(
+ self,
+ "0.0.0.0",
+ "232.1.1.1", 32,
+ MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE,
+ paths=[VppMRoutePath(self.pg0.sw_if_index,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_ACCEPT),
+ VppMRoutePath(0xffffffff,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ type=FibPathType.FIB_PATH_TYPE_BIER_IMP,
+ bier_imp=bi.bi_index)])
+ route_ing_232_1_1_1.add_vpp_config()
+
+ #
+ # inject an IP packet. We expect it to be BIER encapped and
+ # replicated.
+ #
+ p = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ IP(src="1.1.1.1", dst="232.1.1.1") /
+ UDP(sport=1234, dport=1234))
+
+ self.pg0.add_stream([p])
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg1.get_capture(2)
+
+ #
+ # Encap Stack is; eth, MPLS, MPLS, BIER
+ #
+ igp_mpls = rx[0][MPLS]
+ self.assertEqual(igp_mpls.label, 2001)
+ self.assertEqual(igp_mpls.ttl, 64)
+ self.assertEqual(igp_mpls.s, 0)
+ bier_mpls = igp_mpls[MPLS].payload
+ self.assertEqual(bier_mpls.label, 101)
+ self.assertEqual(bier_mpls.ttl, 64)
+ self.assertEqual(bier_mpls.s, 1)
+ self.assertEqual(rx[0][BIER].length, 2)
+
+ igp_mpls = rx[1][MPLS]
+ self.assertEqual(igp_mpls.label, 2002)
+ self.assertEqual(igp_mpls.ttl, 64)
+ self.assertEqual(igp_mpls.s, 0)
+ bier_mpls = igp_mpls[MPLS].payload
+ self.assertEqual(bier_mpls.label, 102)
+ self.assertEqual(bier_mpls.ttl, 64)
+ self.assertEqual(bier_mpls.s, 1)
+ self.assertEqual(rx[0][BIER].length, 2)
+
+ def test_bier_tail(self):
+ """BIER Tail"""
+
+ MRouteItfFlags = VppEnum.vl_api_mfib_itf_flags_t
+ MRouteEntryFlags = VppEnum.vl_api_mfib_entry_flags_t
+
+ #
+ # Add a BIER table for sub-domain 0, set 0, and BSL 256
+ #
+ bti = VppBierTableID(0, 0, BIERLength.BIER_LEN_256)
+ bt = VppBierTable(self, bti, 77)
+ bt.add_vpp_config()
+
+ #
+ # disposition table
+ #
+ bdt = VppBierDispTable(self, 8)
+ bdt.add_vpp_config()
+
+ #
+ # BIER route in table that's for-us
+ #
+ bier_route_1 = VppBierRoute(
+ self, bti, 1,
+ [VppRoutePath("0.0.0.0",
+ 0xffffffff,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ nh_table_id=8)])
+ bier_route_1.add_vpp_config()
+
+ #
+ # An entry in the disposition table
+ #
+ bier_de_1 = VppBierDispEntry(self, bdt.id, 99,
+ BIER_HDR_PAYLOAD.BIER_HDR_PROTO_IPV4,
+ FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ "0.0.0.0", 0, rpf_id=8192)
+ bier_de_1.add_vpp_config()
+
+ #
+ # A multicast route to forward post BIER disposition
+ #
+ route_eg_232_1_1_1 = VppIpMRoute(
+ self,
+ "0.0.0.0",
+ "232.1.1.1", 32,
+ MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE,
+ paths=[VppMRoutePath(self.pg1.sw_if_index,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD)])
+ route_eg_232_1_1_1.add_vpp_config()
+ route_eg_232_1_1_1.update_rpf_id(8192)
+
+ #
+ # A packet with all bits set gets spat out to BP:1
+ #
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ MPLS(label=77, ttl=255) /
+ BIER(length=BIERLength.BIER_LEN_256,
+ BitString=scapy.compat.chb(255)*32,
+ BFRID=99) /
+ IP(src="1.1.1.1", dst="232.1.1.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw())
+
+ self.send_and_expect(self.pg0, [p], self.pg1)
+
+ #
+ # A packet that does not match the Disposition entry gets dropped
+ #
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ MPLS(label=77, ttl=255) /
+ BIER(length=BIERLength.BIER_LEN_256,
+ BitString=scapy.compat.chb(255)*32,
+ BFRID=77) /
+ IP(src="1.1.1.1", dst="232.1.1.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw())
+ self.send_and_assert_no_replies(self.pg0, p*2,
+ "no matching disposition entry")
+
+ #
+ # Add the default route to the disposition table
+ #
+ bier_de_2 = VppBierDispEntry(self, bdt.id, 0,
+ BIER_HDR_PAYLOAD.BIER_HDR_PROTO_IPV4,
+ FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ "0.0.0.0", 0, rpf_id=8192)
+ bier_de_2.add_vpp_config()
+
+ #
+ # now the previous packet is forwarded
+ #
+ self.send_and_expect(self.pg0, [p], self.pg1)
+
+ #
+ # A multicast route to forward post BIER disposition that needs
+ # a check against sending back into the BIER core
+ #
+ bi = VppBierImp(self, bti, 333, scapy.compat.chb(0x3) * 32)
+ bi.add_vpp_config()
+
+ route_eg_232_1_1_2 = VppIpMRoute(
+ self,
+ "0.0.0.0",
+ "232.1.1.2", 32,
+ MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE,
+ paths=[VppMRoutePath(0xffffffff,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD,
+ proto=DpoProto.DPO_PROTO_BIER,
+ type=FibPathType.FIB_PATH_TYPE_BIER_IMP,
+ bier_imp=bi.bi_index),
+ VppMRoutePath(self.pg1.sw_if_index,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD)])
+ route_eg_232_1_1_2.add_vpp_config()
+ route_eg_232_1_1_2.update_rpf_id(8192)
+
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ MPLS(label=77, ttl=255) /
+ BIER(length=BIERLength.BIER_LEN_256,
+ BitString=scapy.compat.chb(255)*32,
+ BFRID=77) /
+ IP(src="1.1.1.1", dst="232.1.1.2") /
+ UDP(sport=1234, dport=1234) /
+ Raw())
+ self.send_and_expect(self.pg0, [p], self.pg1)
+
+ def bier_e2e(self, hdr_len_id, n_bytes, max_bp):
+ """ BIER end-to-end"""
+
+ MRouteItfFlags = VppEnum.vl_api_mfib_itf_flags_t
+ MRouteEntryFlags = VppEnum.vl_api_mfib_entry_flags_t
+
+ #
+ # Add a BIER table for sub-domain 0, set 0, and BSL 256
+ #
+ bti = VppBierTableID(0, 0, hdr_len_id)
+ bt = VppBierTable(self, bti, 77)
+ bt.add_vpp_config()
+
+ lowest = [b'\0'] * (n_bytes)
+ lowest[-1] = scapy.compat.chb(1)
+ highest = [b'\0'] * (n_bytes)
+ highest[0] = scapy.compat.chb(128)
+
+ #
+ # Impostion Sets bit strings
+ #
+ bi_low = VppBierImp(self, bti, 333, lowest)
+ bi_low.add_vpp_config()
+ bi_high = VppBierImp(self, bti, 334, highest)
+ bi_high.add_vpp_config()
+
+ #
+ # Add a multicast route that will forward into the BIER doamin
+ #
+ route_ing_232_1_1_1 = VppIpMRoute(
+ self,
+ "0.0.0.0",
+ "232.1.1.1", 32,
+ MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE,
+ paths=[VppMRoutePath(self.pg0.sw_if_index,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_ACCEPT),
+ VppMRoutePath(0xffffffff,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ type=FibPathType.FIB_PATH_TYPE_BIER_IMP,
+ bier_imp=bi_low.bi_index)])
+ route_ing_232_1_1_1.add_vpp_config()
+ route_ing_232_1_1_2 = VppIpMRoute(
+ self,
+ "0.0.0.0",
+ "232.1.1.2", 32,
+ MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE,
+ paths=[VppMRoutePath(self.pg0.sw_if_index,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_ACCEPT),
+ VppMRoutePath(0xffffffff,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ type=FibPathType.FIB_PATH_TYPE_BIER_IMP,
+ bier_imp=bi_high.bi_index)])
+ route_ing_232_1_1_2.add_vpp_config()
+
+ #
+ # disposition table 8
+ #
+ bdt = VppBierDispTable(self, 8)
+ bdt.add_vpp_config()
+
+ #
+ # BIER routes in table that are for-us, resolving through
+ # disp table 8.
+ #
+ bier_route_1 = VppBierRoute(
+ self, bti, 1,
+ [VppRoutePath("0.0.0.0",
+ 0xffffffff,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ nh_table_id=8)])
+ bier_route_1.add_vpp_config()
+ bier_route_max = VppBierRoute(
+ self, bti, max_bp,
+ [VppRoutePath("0.0.0.0",
+ 0xffffffff,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ nh_table_id=8)])
+ bier_route_max.add_vpp_config()
+
+ #
+ # An entry in the disposition table for sender 333
+ # lookup in VRF 10
+ #
+ bier_de_1 = VppBierDispEntry(self, bdt.id, 333,
+ BIER_HDR_PAYLOAD.BIER_HDR_PROTO_IPV4,
+ FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ "0.0.0.0", 10, rpf_id=8192)
+ bier_de_1.add_vpp_config()
+ bier_de_1 = VppBierDispEntry(self, bdt.id, 334,
+ BIER_HDR_PAYLOAD.BIER_HDR_PROTO_IPV4,
+ FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ "0.0.0.0", 10, rpf_id=8193)
+ bier_de_1.add_vpp_config()
+
+ #
+ # Add a multicast routes that will forward the traffic
+ # post-disposition
+ #
+ route_eg_232_1_1_1 = VppIpMRoute(
+ self,
+ "0.0.0.0",
+ "232.1.1.1", 32,
+ MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE,
+ table_id=10,
+ paths=[VppMRoutePath(self.pg1.sw_if_index,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD)])
+ route_eg_232_1_1_1.add_vpp_config()
+ route_eg_232_1_1_1.update_rpf_id(8192)
+ route_eg_232_1_1_2 = VppIpMRoute(
+ self,
+ "0.0.0.0",
+ "232.1.1.2", 32,
+ MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE,
+ table_id=10,
+ paths=[VppMRoutePath(self.pg1.sw_if_index,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD)])
+ route_eg_232_1_1_2.add_vpp_config()
+ route_eg_232_1_1_2.update_rpf_id(8193)
+
+ #
+ # inject a packet in VRF-0. We expect it to be BIER encapped,
+ # replicated, then hit the disposition and be forwarded
+ # out of VRF 10, i.e. on pg1
+ #
+ p = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ IP(src="1.1.1.1", dst="232.1.1.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(scapy.compat.chb(5) * 32))
+
+ rx = self.send_and_expect(self.pg0, p*NUM_PKTS, self.pg1)
+
+ self.assertEqual(rx[0][IP].src, "1.1.1.1")
+ self.assertEqual(rx[0][IP].dst, "232.1.1.1")
+
+ p = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ IP(src="1.1.1.1", dst="232.1.1.2") /
+ UDP(sport=1234, dport=1234) /
+ Raw(scapy.compat.chb(5) * 512))
+
+ rx = self.send_and_expect(self.pg0, p*NUM_PKTS, self.pg1)
+ self.assertEqual(rx[0][IP].src, "1.1.1.1")
+ self.assertEqual(rx[0][IP].dst, "232.1.1.2")
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ def test_bier_e2e_1024(self):
+ """ BIER end-to-end BSL:1024"""
+ self.bier_e2e(BIERLength.BIER_LEN_1024, 128, 1024)
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ def test_bier_e2e_512(self):
+ """ BIER end-to-end BSL:512"""
+ self.bier_e2e(BIERLength.BIER_LEN_512, 64, 512)
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ def test_bier_e2e_256(self):
+ """ BIER end-to-end BSL:256"""
+ self.bier_e2e(BIERLength.BIER_LEN_256, 32, 256)
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ def test_bier_e2e_128(self):
+ """ BIER end-to-end BSL:128"""
+ self.bier_e2e(BIERLength.BIER_LEN_128, 16, 128)
+
+ def test_bier_e2e_64(self):
+ """ BIER end-to-end BSL:64"""
+ self.bier_e2e(BIERLength.BIER_LEN_64, 8, 64)
+
+ def test_bier_head_o_udp(self):
+ """BIER head over UDP"""
+
+ MRouteItfFlags = VppEnum.vl_api_mfib_itf_flags_t
+ MRouteEntryFlags = VppEnum.vl_api_mfib_entry_flags_t
+
+ #
+ # Add a BIER table for sub-domain 1, set 0, and BSL 256
+ #
+ bti = VppBierTableID(1, 0, BIERLength.BIER_LEN_256)
+ bt = VppBierTable(self, bti, 77)
+ bt.add_vpp_config()
+
+ #
+ # 1 bit positions via 1 next hops
+ #
+ nh1 = "10.0.0.1"
+ ip_route = VppIpRoute(self, nh1, 32,
+ [VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index,
+ labels=[VppMplsLabel(2001)])])
+ ip_route.add_vpp_config()
+
+ udp_encap = VppUdpEncap(self,
+ self.pg0.local_ip4,
+ nh1,
+ 330, 8138)
+ udp_encap.add_vpp_config()
+
+ bier_route = VppBierRoute(
+ self, bti, 1,
+ [VppRoutePath("0.0.0.0",
+ 0xFFFFFFFF,
+ type=FibPathType.FIB_PATH_TYPE_UDP_ENCAP,
+ next_hop_id=udp_encap.id)])
+ bier_route.add_vpp_config()
+
+ #
+ # An 2 imposition objects with all bit-positions set
+ # only use the second, but creating 2 tests with a non-zero
+ # value index in the route add
+ #
+ bi = VppBierImp(self, bti, 333, scapy.compat.chb(0xff) * 32)
+ bi.add_vpp_config()
+ bi2 = VppBierImp(self, bti, 334, scapy.compat.chb(0xff) * 32)
+ bi2.add_vpp_config()
+
+ #
+ # Add a multicast route that will forward into the BIER doamin
+ #
+ route_ing_232_1_1_1 = VppIpMRoute(
+ self,
+ "0.0.0.0",
+ "232.1.1.1", 32,
+ MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE,
+ paths=[VppMRoutePath(self.pg0.sw_if_index,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_ACCEPT),
+ VppMRoutePath(0xffffffff,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ type=FibPathType.FIB_PATH_TYPE_BIER_IMP,
+ bier_imp=bi2.bi_index)])
+ route_ing_232_1_1_1.add_vpp_config()
+
+ #
+ # inject a packet an IP. We expect it to be BIER and UDP encapped,
+ #
+ p = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ IP(src="1.1.1.1", dst="232.1.1.1") /
+ UDP(sport=1234, dport=1234))
+
+ self.pg0.add_stream([p])
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg1.get_capture(1)
+
+ #
+ # Encap Stack is, eth, IP, UDP, BIFT, BIER
+ #
+ self.assertEqual(rx[0][IP].src, self.pg0.local_ip4)
+ self.assertEqual(rx[0][IP].dst, nh1)
+ self.assertEqual(rx[0][UDP].sport, 330)
+ self.assertEqual(rx[0][UDP].dport, 8138)
+ self.assertEqual(rx[0][BIFT].bsl, BIERLength.BIER_LEN_256)
+ self.assertEqual(rx[0][BIFT].sd, 1)
+ self.assertEqual(rx[0][BIFT].set, 0)
+ self.assertEqual(rx[0][BIFT].ttl, 64)
+ self.assertEqual(rx[0][BIER].length, 2)
+
+ def test_bier_tail_o_udp(self):
+ """BIER Tail over UDP"""
+
+ MRouteItfFlags = VppEnum.vl_api_mfib_itf_flags_t
+ MRouteEntryFlags = VppEnum.vl_api_mfib_entry_flags_t
+
+ #
+ # Add a BIER table for sub-domain 0, set 0, and BSL 256
+ #
+ bti = VppBierTableID(1, 0, BIERLength.BIER_LEN_256)
+ bt = VppBierTable(self, bti, MPLS_LABEL_INVALID)
+ bt.add_vpp_config()
+
+ #
+ # disposition table
+ #
+ bdt = VppBierDispTable(self, 8)
+ bdt.add_vpp_config()
+
+ #
+ # BIER route in table that's for-us
+ #
+ bier_route_1 = VppBierRoute(
+ self, bti, 1,
+ [VppRoutePath("0.0.0.0",
+ 0xffffffff,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ nh_table_id=8)])
+ bier_route_1.add_vpp_config()
+
+ #
+ # An entry in the disposition table
+ #
+ bier_de_1 = VppBierDispEntry(self, bdt.id, 99,
+ BIER_HDR_PAYLOAD.BIER_HDR_PROTO_IPV4,
+ FibPathProto.FIB_PATH_NH_PROTO_BIER,
+ "0.0.0.0", 0, rpf_id=8192)
+ bier_de_1.add_vpp_config()
+
+ #
+ # A multicast route to forward post BIER disposition
+ #
+ route_eg_232_1_1_1 = VppIpMRoute(
+ self,
+ "0.0.0.0",
+ "232.1.1.1", 32,
+ MRouteEntryFlags.MFIB_API_ENTRY_FLAG_NONE,
+ paths=[VppMRoutePath(self.pg1.sw_if_index,
+ MRouteItfFlags.MFIB_API_ITF_FLAG_FORWARD)])
+ route_eg_232_1_1_1.add_vpp_config()
+ route_eg_232_1_1_1.update_rpf_id(8192)
+
+ #
+ # A packet with all bits set gets spat out to BP:1
+ #
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
+ UDP(sport=333, dport=8138) /
+ BIFT(sd=1, set=0, bsl=2, ttl=255) /
+ BIER(length=BIERLength.BIER_LEN_256,
+ BitString=scapy.compat.chb(255)*32,
+ BFRID=99) /
+ IP(src="1.1.1.1", dst="232.1.1.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw())
+
+ rx = self.send_and_expect(self.pg0, [p], self.pg1)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_bihash.py b/test/test_bihash.py
new file mode 100644
index 00000000000..2949d66750d
--- /dev/null
+++ b/test/test_bihash.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python3
+
+import unittest
+
+from framework import VppTestCase, VppTestRunner, running_gcov_tests
+from vpp_ip_route import VppIpTable, VppIpRoute, VppRoutePath
+
+
+class TestBihash(VppTestCase):
+ """ Bihash Test Cases """
+
+ @classmethod
+ def setUpClass(cls):
+ # increase vapi timeout, to avoid spurious "test bihash ..."
+ # failures reported on aarch64 w/ test-debug
+ cls.vapi_response_timeout = 20
+ super(TestBihash, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestBihash, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestBihash, self).setUp()
+
+ def tearDown(self):
+ super(TestBihash, self).tearDown()
+
+ def test_bihash_unittest(self):
+ """ Bihash Add/Del Test """
+ error = self.vapi.cli("test bihash careful 0 verbose 0")
+
+ if error:
+ self.logger.critical(error)
+ self.assertNotIn('failed', error)
+
+ def test_bihash_thread(self):
+ """ Bihash Thread Test """
+
+ error = self.vapi.cli("test bihash threads 2 nbuckets" +
+ " 64000 careful 0 verbose 0")
+
+ if error:
+ self.logger.critical(error)
+ self.assertNotIn('failed', error)
+
+ def test_bihash_vec64(self):
+ """ Bihash vec64 Test """
+
+ error = self.vapi.cli("test bihash vec64")
+
+ if error:
+ self.logger.critical(error)
+ self.assertNotIn('failed', error)
+
+ @unittest.skipUnless(running_gcov_tests, "part of code coverage tests")
+ def test_bihash_coverage(self):
+ """ Improve Code Coverage """
+
+ error = self.vapi.cli("test bihash nitems 10 ncycles 3" +
+ "search 2 careful 1 verbose 2 non-random-keys")
+
+ if error:
+ self.logger.critical(error)
+ self.assertNotIn('failed', error)
+
+ error = self.vapi.cli("test bihash nitems 10 nbuckets 1 ncycles 3" +
+ "search 2 careful 1 verbose 2 non-random-keys")
+ if error:
+ self.logger.critical(error)
+ self.assertNotIn('failed', error)
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_bond.py b/test/test_bond.py
new file mode 100644
index 00000000000..5df86ae5b0f
--- /dev/null
+++ b/test/test_bond.py
@@ -0,0 +1,321 @@
+#!/usr/bin/env python3
+
+import socket
+import unittest
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, UDP
+
+from framework import VppTestCase, VppTestRunner
+from vpp_bond_interface import VppBondInterface
+from vpp_papi import MACAddress, VppEnum
+
+
+class TestBondInterface(VppTestCase):
+ """Bond Test Case
+
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestBondInterface, cls).setUpClass()
+ # Test variables
+ cls.pkts_per_burst = 257 # Number of packets per burst
+ # create 3 pg interfaces
+ cls.create_pg_interfaces(range(4))
+
+ # packet sizes
+ cls.pg_if_packet_sizes = [64, 512, 1518] # , 9018]
+
+ # setup all interfaces
+ for i in cls.pg_interfaces:
+ i.admin_up()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestBondInterface, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestBondInterface, self).setUp()
+
+ def tearDown(self):
+ super(TestBondInterface, self).tearDown()
+
+ def show_commands_at_teardown(self):
+ self.logger.info(self.vapi.ppcli("show interface"))
+
+ def test_bond_traffic(self):
+ """ Bond traffic test """
+
+ # topology
+ #
+ # RX-> TX->
+ #
+ # pg2 ------+ +------pg0 (member)
+ # | |
+ # BondEthernet0 (10.10.10.1)
+ # | |
+ # pg3 ------+ +------pg1 (memberx)
+ #
+
+ # create interface (BondEthernet0)
+ # self.logger.info("create bond")
+ bond0_mac = "02:fe:38:30:59:3c"
+ mac = MACAddress(bond0_mac).packed
+ bond0 = VppBondInterface(
+ self,
+ mode=VppEnum.vl_api_bond_mode_t.BOND_API_MODE_XOR,
+ lb=VppEnum.vl_api_bond_lb_algo_t.BOND_API_LB_ALGO_L34,
+ numa_only=0,
+ use_custom_mac=1,
+ mac_address=mac)
+ bond0.add_vpp_config()
+ bond0.admin_up()
+ self.vapi.sw_interface_add_del_address(
+ sw_if_index=bond0.sw_if_index,
+ prefix="10.10.10.1/24")
+
+ self.pg2.config_ip4()
+ self.pg2.resolve_arp()
+ self.pg3.config_ip4()
+ self.pg3.resolve_arp()
+
+ self.logger.info(self.vapi.cli("show interface"))
+ self.logger.info(self.vapi.cli("show interface address"))
+ self.logger.info(self.vapi.cli("show ip neighbors"))
+
+ # add member pg0 and pg1 to BondEthernet0
+ self.logger.info("bond add member interface pg0 to BondEthernet0")
+ bond0.add_member_vpp_bond_interface(sw_if_index=self.pg0.sw_if_index)
+ self.logger.info("bond add_member interface pg1 to BondEthernet0")
+ bond0.add_member_vpp_bond_interface(sw_if_index=self.pg1.sw_if_index)
+
+ # verify both members in BondEthernet0
+ if_dump = self.vapi.sw_member_interface_dump(bond0.sw_if_index)
+ self.assertTrue(self.pg0.is_interface_config_in_dump(if_dump))
+ self.assertTrue(self.pg1.is_interface_config_in_dump(if_dump))
+
+ # generate a packet from pg2 -> BondEthernet0 -> pg1
+ # BondEthernet0 TX hashes this packet to pg1
+ p2 = (Ether(src=bond0_mac, dst=self.pg2.local_mac) /
+ IP(src=self.pg2.local_ip4, dst="10.10.10.12") /
+ UDP(sport=1235, dport=1235) /
+ Raw(b'\xa5' * 100))
+ self.pg2.add_stream(p2)
+
+ # generate a packet from pg3 -> BondEthernet0 -> pg0
+ # BondEthernet0 TX hashes this packet to pg0
+ # notice the ip address and ports are different than p2 packet
+ p3 = (Ether(src=bond0_mac, dst=self.pg3.local_mac) /
+ IP(src=self.pg3.local_ip4, dst="10.10.10.11") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ self.pg3.add_stream(p3)
+
+ self.pg_enable_capture(self.pg_interfaces)
+
+ # set up the static arp entries pointing to the BondEthernet0 interface
+ # so that it does not try to resolve the ip address
+ self.logger.info(self.vapi.cli(
+ "set ip neighbor static BondEthernet0 10.10.10.12 abcd.abcd.0002"))
+ self.logger.info(self.vapi.cli(
+ "set ip neighbor static BondEthernet0 10.10.10.11 abcd.abcd.0004"))
+
+ # clear the interface counters
+ self.logger.info(self.vapi.cli("clear interfaces"))
+
+ self.pg_start()
+
+ self.logger.info("check the interface counters")
+
+ # verify counters
+
+ # BondEthernet0 tx bytes = 284
+ intfs = self.vapi.cli("show interface BondEthernet0").split("\n")
+ found = 0
+ for intf in intfs:
+ if "tx bytes" in intf and "284" in intf:
+ found = 1
+ self.assertEqual(found, 1)
+
+ # BondEthernet0 tx bytes = 284
+ intfs = self.vapi.cli("show interface BondEthernet0").split("\n")
+ found = 0
+ for intf in intfs:
+ if "tx bytes" in intf and "284" in intf:
+ found = 1
+ self.assertEqual(found, 1)
+
+ # pg2 rx bytes = 142
+ intfs = self.vapi.cli("show interface pg2").split("\n")
+ found = 0
+ for intf in intfs:
+ if "rx bytes" in intf and "142" in intf:
+ found = 1
+ self.assertEqual(found, 1)
+
+ # pg3 rx bytes = 142
+ intfs = self.vapi.cli("show interface pg3").split("\n")
+ found = 0
+ for intf in intfs:
+ if "rx bytes" in intf and "142" in intf:
+ found = 1
+ self.assertEqual(found, 1)
+
+ bond0.remove_vpp_config()
+
+ def test_bond_add_member(self):
+ """ Bond add_member/detach member test """
+
+ # create interface (BondEthernet0) and set bond mode to LACP
+ self.logger.info("create bond")
+ bond0 = VppBondInterface(
+ self,
+ mode=VppEnum.vl_api_bond_mode_t.BOND_API_MODE_LACP,
+ enable_gso=0)
+ bond0.add_vpp_config()
+ bond0.admin_up()
+
+ # verify that interfaces can be added as_member and detached two times
+ for i in range(2):
+ # verify pg0 and pg1 not in BondEthernet0
+ if_dump = self.vapi.sw_member_interface_dump(bond0.sw_if_index)
+ self.assertFalse(self.pg0.is_interface_config_in_dump(if_dump))
+ self.assertFalse(self.pg1.is_interface_config_in_dump(if_dump))
+
+ # add_member pg0 and pg1 to BondEthernet0
+ self.logger.info("bond add_member interface pg0 to BondEthernet0")
+ bond0.add_member_vpp_bond_interface(
+ sw_if_index=self.pg0.sw_if_index,
+ is_passive=0,
+ is_long_timeout=0)
+
+ self.logger.info("bond add_member interface pg1 to BondEthernet0")
+ bond0.add_member_vpp_bond_interface(
+ sw_if_index=self.pg1.sw_if_index,
+ is_passive=0,
+ is_long_timeout=0)
+ # verify both members in BondEthernet0
+ if_dump = self.vapi.sw_member_interface_dump(bond0.sw_if_index)
+ self.assertTrue(self.pg0.is_interface_config_in_dump(if_dump))
+ self.assertTrue(self.pg1.is_interface_config_in_dump(if_dump))
+
+ # detach interface pg0
+ self.logger.info("detach interface pg0")
+ bond0.detach_vpp_bond_interface(sw_if_index=self.pg0.sw_if_index)
+
+ # verify pg0 is not in BondEthernet0, but pg1 is
+ if_dump = self.vapi.sw_member_interface_dump(bond0.sw_if_index)
+ self.assertFalse(self.pg0.is_interface_config_in_dump(if_dump))
+ self.assertTrue(self.pg1.is_interface_config_in_dump(if_dump))
+
+ # detach interface pg1
+ self.logger.info("detach interface pg1")
+ bond0.detach_vpp_bond_interface(sw_if_index=self.pg1.sw_if_index)
+
+ # verify pg0 and pg1 not in BondEthernet0
+ if_dump = self.vapi.sw_member_interface_dump(bond0.sw_if_index)
+ self.assertFalse(self.pg0.is_interface_config_in_dump(if_dump))
+ self.assertFalse(self.pg1.is_interface_config_in_dump(if_dump))
+
+ bond0.remove_vpp_config()
+
+ def test_bond(self):
+ """ Bond add/delete interface test """
+ self.logger.info("Bond add interfaces")
+
+ # create interface 1 (BondEthernet0)
+ bond0 = VppBondInterface(
+ self, mode=VppEnum.vl_api_bond_mode_t.BOND_API_MODE_LACP)
+ bond0.add_vpp_config()
+ bond0.admin_up()
+
+ # create interface 2 (BondEthernet1)
+ bond1 = VppBondInterface(
+ self, mode=VppEnum.vl_api_bond_mode_t.BOND_API_MODE_XOR)
+ bond1.add_vpp_config()
+ bond1.admin_up()
+
+ # verify both interfaces in the show
+ ifs = self.vapi.cli("show interface")
+ self.assertIn('BondEthernet0', ifs)
+ self.assertIn('BondEthernet1', ifs)
+
+ # verify they are in the dump also
+ if_dump = self.vapi.sw_bond_interface_dump(sw_if_index=0xFFFFFFFF)
+ self.assertTrue(bond0.is_interface_config_in_dump(if_dump))
+ self.assertTrue(bond1.is_interface_config_in_dump(if_dump))
+
+ # delete BondEthernet1
+ self.logger.info("Deleting BondEthernet1")
+ bond1.remove_vpp_config()
+
+ self.logger.info("Verifying BondEthernet1 is deleted")
+
+ ifs = self.vapi.cli("show interface")
+ # verify BondEthernet0 still in the show
+ self.assertIn('BondEthernet0', ifs)
+
+ # verify BondEthernet1 not in the show
+ self.assertNotIn('BondEthernet1', ifs)
+
+ # verify BondEthernet1 is not in the dump
+ if_dump = self.vapi.sw_bond_interface_dump(sw_if_index=0xFFFFFFFF)
+ self.assertFalse(bond1.is_interface_config_in_dump(if_dump))
+
+ # verify BondEthernet0 is still in the dump
+ self.assertTrue(bond0.is_interface_config_in_dump(if_dump))
+
+ # delete BondEthernet0
+ self.logger.info("Deleting BondEthernet0")
+ bond0.remove_vpp_config()
+
+ self.logger.info("Verifying BondEthernet0 is deleted")
+
+ # verify BondEthernet0 not in the show
+ ifs = self.vapi.cli("show interface")
+ self.assertNotIn('BondEthernet0', ifs)
+
+ # verify BondEthernet0 is not in the dump
+ if_dump = self.vapi.sw_bond_interface_dump(
+ sw_if_index=bond0.sw_if_index)
+ self.assertFalse(bond0.is_interface_config_in_dump(if_dump))
+
+ def test_bond_link(self):
+ """ Bond hw interface link state test """
+
+ # for convenience
+ bond_modes = VppEnum.vl_api_bond_mode_t
+ intf_flags = VppEnum.vl_api_if_status_flags_t
+
+ # create interface 1 (BondEthernet0)
+ self.logger.info("Create bond interface")
+ # use round-robin mode to avoid negotiation required by LACP
+ bond0 = VppBondInterface(self,
+ mode=bond_modes.BOND_API_MODE_ROUND_ROBIN)
+ bond0.add_vpp_config()
+
+ # set bond admin up.
+ self.logger.info("set interface BondEthernet0 admin up")
+ bond0.admin_up()
+ # confirm link up
+ bond0.assert_interface_state(intf_flags.IF_STATUS_API_FLAG_ADMIN_UP,
+ intf_flags.IF_STATUS_API_FLAG_LINK_UP)
+
+ # toggle bond admin state
+ self.logger.info("toggle interface BondEthernet0")
+ bond0.admin_down()
+ bond0.admin_up()
+
+ # confirm link is still up
+ bond0.assert_interface_state(intf_flags.IF_STATUS_API_FLAG_ADMIN_UP,
+ intf_flags.IF_STATUS_API_FLAG_LINK_UP)
+
+ # delete BondEthernet0
+ self.logger.info("Deleting BondEthernet0")
+ bond0.remove_vpp_config()
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_buffers.py b/test/test_buffers.py
new file mode 100644
index 00000000000..f50f05c609a
--- /dev/null
+++ b/test/test_buffers.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python3
+
+from framework import VppTestCase
+
+
+class TestBuffers(VppTestCase):
+ """ Buffer C Unit Tests """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestBuffers, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestBuffers, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestBuffers, self).setUp()
+
+ def tearDown(self):
+ super(TestBuffers, self).tearDown()
+
+ def test_linearize(self):
+ """ Chained Buffer Linearization """
+ error = self.vapi.cli("test chained-buffer-linearization")
+
+ if error:
+ self.logger.critical(error)
+ self.assertNotIn('failed', error)
diff --git a/test/test_cdp.py b/test/test_cdp.py
new file mode 100644
index 00000000000..46751e81d86
--- /dev/null
+++ b/test/test_cdp.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python3
+""" CDP tests """
+
+from scapy.packet import Packet
+from scapy.all import ShortField, StrField
+from scapy.layers.l2 import Dot3, LLC, SNAP
+from scapy.contrib.cdp import CDPMsgDeviceID, CDPMsgSoftwareVersion, \
+ CDPMsgPlatform, CDPMsgPortID, CDPv2_HDR
+
+from framework import VppTestCase
+from scapy.all import raw
+from re import compile
+from time import sleep
+from util import ppp
+import platform
+import sys
+import unittest
+
+
+""" TestCDP is a subclass of VPPTestCase classes.
+
+CDP test.
+
+"""
+
+
+class CustomTLV(Packet):
+ """ Custom TLV protocol layer for scapy """
+
+ fields_desc = [
+ ShortField("type", 0),
+ ShortField("length", 4),
+ StrField("value", "")
+
+ ]
+
+
+class TestCDP(VppTestCase):
+ """ CDP Test Case """
+
+ nen_ptr = compile(r"not enabled")
+ cdp_ptr = compile(r"^([-\.\w]+)\s+([-\.\w]+)\s+([-\.\w]+)\s+([-\.\w]+)$")
+ err_ptr = compile(r"^([\d]+)\s+([-\w]+)\s+([ -\.\w)(]+)$")
+
+ @property
+ def device_id(self):
+ return platform.node()
+
+ @property
+ def version(self):
+ return platform.release()
+
+ @property
+ def port_id(self):
+ return self.interface.name
+
+ @property
+ def platform(self):
+ return platform.system()
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestCDP, cls).setUpClass()
+ try:
+ cls.create_pg_interfaces(range(1))
+ cls.interface = cls.pg_interfaces[0]
+
+ cls.interface.admin_up()
+ cls.interface.config_ip4()
+ cls.interface.resolve_arp()
+
+ except Exception:
+ super(TestCDP, cls).tearDownClass()
+ raise
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestCDP, cls).tearDownClass()
+
+ def test_enable_cdp(self):
+ self.logger.info(self.vapi.cdp_enable_disable(enable_disable=1))
+ ret = self.vapi.cli("show cdp")
+ self.logger.info(ret)
+ not_enabled = self.nen_ptr.search(ret)
+ self.assertFalse(not_enabled, "CDP isn't enabled")
+
+ def test_send_cdp_packet(self):
+ self.logger.info(self.vapi.cdp_enable_disable(enable_disable=1))
+ self.send_packet(self.create_packet())
+
+ neighbors = list(self.show_cdp())
+ self.assertTrue(neighbors, "CDP didn't register neighbor")
+
+ port, system = neighbors[0]
+ length = min(len(system), len(self.device_id))
+
+ self.assert_equal(port, self.port_id, "CDP received invalid port id")
+ self.assert_equal(system[:length], self.device_id[:length],
+ "CDP received invalid device id")
+
+ def test_cdp_underflow_tlv(self):
+ self.send_bad_packet(3, ".")
+
+ def test_cdp_overflow_tlv(self):
+ self.send_bad_packet(8, ".")
+
+ def send_bad_packet(self, l, v):
+ self.logger.info(self.vapi.cdp_enable_disable(enable_disable=1))
+ self.send_packet(self.create_bad_packet(l, v))
+
+ err = self.statistics.get_err_counter(
+ '/err/cdp-input/cdp packets with bad TLVs')
+ self.assertTrue(err >= 1, "CDP didn't drop bad packet")
+
+ def send_packet(self, packet):
+ self.logger.debug(ppp("Sending packet:", packet))
+ self.interface.add_stream(packet)
+ self.pg_start()
+
+ def create_base_packet(self):
+ packet = (Dot3(src=self.interface.remote_mac,
+ dst="01:00:0c:cc:cc:cc") /
+ LLC(dsap=0xaa, ssap=0xaa, ctrl=0x03) /
+ SNAP()/CDPv2_HDR())
+ return packet
+
+ def create_packet(self):
+ packet = (self.create_base_packet() /
+ CDPMsgDeviceID(val=self.device_id) /
+ CDPMsgSoftwareVersion(val=self.version) /
+ CDPMsgPortID(iface=self.port_id) /
+ CDPMsgPlatform(val=self.platform))
+ return packet
+
+ def create_bad_packet(self, tl=4, tv=""):
+ packet = (self.create_base_packet() /
+ CustomTLV(type=1,
+ length=tl,
+ value=tv))
+ return packet
+
+ def process_cli(self, exp, ptr):
+ for line in self.vapi.cli(exp).split('\n')[1:]:
+ m = ptr.match(line.strip())
+ if m:
+ yield m.groups()
+
+ def show_cdp(self):
+ for pack in self.process_cli("show cdp", self.cdp_ptr):
+ try:
+ port, system, _, _ = pack
+ except ValueError:
+ pass
+ else:
+ yield port, system
diff --git a/test/test_classifier.py b/test/test_classifier.py
new file mode 100644
index 00000000000..11c0985f4d4
--- /dev/null
+++ b/test/test_classifier.py
@@ -0,0 +1,569 @@
+#!/usr/bin/env python3
+
+import binascii
+import socket
+import unittest
+
+from framework import VppTestCase, VppTestRunner
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, UDP, TCP
+from util import ppp
+from template_classifier import TestClassifier
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_ip import INVALID_INDEX
+
+
+# Tests split to different test case classes because of issue reported in
+# ticket VPP-1336
+class TestClassifierIP(TestClassifier):
+ """ Classifier IP Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestClassifierIP, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestClassifierIP, cls).tearDownClass()
+
+ def test_iacl_src_ip(self):
+ """ Source IP iACL test
+
+ Test scenario for basic IP ACL with source IP
+ - Create IPv4 stream for pg0 -> pg1 interface.
+ - Create iACL with source IP address.
+ - Send and verify received packets on pg1 interface.
+ """
+
+ # Basic iACL testing with source IP
+ pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes)
+ self.pg0.add_stream(pkts)
+
+ key = 'ip_src'
+ self.create_classify_table(key, self.build_ip_mask(src_ip='ffffffff'))
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip_match(src_ip=self.pg0.remote_ip4))
+ self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg1.get_capture(len(pkts))
+ self.verify_capture(self.pg1, pkts)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+ self.pg3.assert_nothing_captured(remark="packets forwarded")
+
+ def test_iacl_dst_ip(self):
+ """ Destination IP iACL test
+
+ Test scenario for basic IP ACL with destination IP
+ - Create IPv4 stream for pg0 -> pg1 interface.
+ - Create iACL with destination IP address.
+ - Send and verify received packets on pg1 interface.
+ """
+
+ # Basic iACL testing with destination IP
+ pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes)
+ self.pg0.add_stream(pkts)
+
+ key = 'ip_dst'
+ self.create_classify_table(key, self.build_ip_mask(dst_ip='ffffffff'))
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip_match(dst_ip=self.pg1.remote_ip4))
+ self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg1.get_capture(len(pkts))
+ self.verify_capture(self.pg1, pkts)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+ self.pg3.assert_nothing_captured(remark="packets forwarded")
+
+ def test_iacl_src_dst_ip(self):
+ """ Source and destination IP iACL test
+
+ Test scenario for basic IP ACL with source and destination IP
+ - Create IPv4 stream for pg0 -> pg1 interface.
+ - Create iACL with source and destination IP addresses.
+ - Send and verify received packets on pg1 interface.
+ """
+
+ # Basic iACL testing with source and destination IP
+ pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes)
+ self.pg0.add_stream(pkts)
+
+ key = 'ip'
+ self.create_classify_table(
+ key, self.build_ip_mask(src_ip='ffffffff', dst_ip='ffffffff'))
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip_match(src_ip=self.pg0.remote_ip4,
+ dst_ip=self.pg1.remote_ip4))
+ self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg1.get_capture(len(pkts))
+ self.verify_capture(self.pg1, pkts)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+ self.pg3.assert_nothing_captured(remark="packets forwarded")
+
+
+class TestClassifierUDP(TestClassifier):
+ """ Classifier UDP proto Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestClassifierUDP, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestClassifierUDP, cls).tearDownClass()
+
+ def test_iacl_proto_udp(self):
+ """ UDP protocol iACL test
+
+ Test scenario for basic protocol ACL with UDP protocol
+ - Create IPv4 stream for pg0 -> pg1 interface.
+ - Create iACL with UDP IP protocol.
+ - Send and verify received packets on pg1 interface.
+ """
+
+ # Basic iACL testing with UDP protocol
+ pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes)
+ self.pg0.add_stream(pkts)
+
+ key = 'proto_udp'
+ self.create_classify_table(key, self.build_ip_mask(proto='ff'))
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip_match(proto=socket.IPPROTO_UDP))
+ self.input_acl_set_interface(
+ self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg1.get_capture(len(pkts))
+ self.verify_capture(self.pg1, pkts)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+ self.pg3.assert_nothing_captured(remark="packets forwarded")
+
+ def test_iacl_proto_udp_sport(self):
+ """ UDP source port iACL test
+
+ Test scenario for basic protocol ACL with UDP and sport
+ - Create IPv4 stream for pg0 -> pg1 interface.
+ - Create iACL with UDP IP protocol and defined sport.
+ - Send and verify received packets on pg1 interface.
+ """
+
+ # Basic iACL testing with UDP and sport
+ sport = 38
+ pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes,
+ UDP(sport=sport, dport=5678))
+ self.pg0.add_stream(pkts)
+
+ key = 'proto_udp_sport'
+ self.create_classify_table(
+ key, self.build_ip_mask(proto='ff', src_port='ffff'))
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip_match(proto=socket.IPPROTO_UDP, src_port=sport))
+ self.input_acl_set_interface(
+ self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg1.get_capture(len(pkts))
+ self.verify_capture(self.pg1, pkts)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+ self.pg3.assert_nothing_captured(remark="packets forwarded")
+
+ def test_iacl_proto_udp_dport(self):
+ """ UDP destination port iACL test
+
+ Test scenario for basic protocol ACL with UDP and dport
+ - Create IPv4 stream for pg0 -> pg1 interface.
+ - Create iACL with UDP IP protocol and defined dport.
+ - Send and verify received packets on pg1 interface.
+ """
+
+ # Basic iACL testing with UDP and dport
+ dport = 427
+ pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes,
+ UDP(sport=1234, dport=dport))
+ self.pg0.add_stream(pkts)
+
+ key = 'proto_udp_dport'
+ self.create_classify_table(
+ key, self.build_ip_mask(proto='ff', dst_port='ffff'))
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip_match(proto=socket.IPPROTO_UDP, dst_port=dport))
+ self.input_acl_set_interface(
+ self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg1.get_capture(len(pkts))
+ self.verify_capture(self.pg1, pkts)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+ self.pg3.assert_nothing_captured(remark="packets forwarded")
+
+ def test_iacl_proto_udp_sport_dport(self):
+ """ UDP source and destination ports iACL test
+
+ Test scenario for basic protocol ACL with UDP and sport and dport
+ - Create IPv4 stream for pg0 -> pg1 interface.
+ - Create iACL with UDP IP protocol and defined sport and dport.
+ - Send and verify received packets on pg1 interface.
+ """
+
+ # Basic iACL testing with UDP and sport and dport
+ sport = 13720
+ dport = 9080
+ pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes,
+ UDP(sport=sport, dport=dport))
+ self.pg0.add_stream(pkts)
+
+ key = 'proto_udp_ports'
+ self.create_classify_table(
+ key,
+ self.build_ip_mask(proto='ff', src_port='ffff', dst_port='ffff'))
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip_match(proto=socket.IPPROTO_UDP, src_port=sport,
+ dst_port=dport))
+ self.input_acl_set_interface(
+ self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg1.get_capture(len(pkts))
+ self.verify_capture(self.pg1, pkts)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+ self.pg3.assert_nothing_captured(remark="packets forwarded")
+
+
+class TestClassifierTCP(TestClassifier):
+ """ Classifier TCP proto Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestClassifierTCP, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestClassifierTCP, cls).tearDownClass()
+
+ def test_iacl_proto_tcp(self):
+ """ TCP protocol iACL test
+
+ Test scenario for basic protocol ACL with TCP protocol
+ - Create IPv4 stream for pg0 -> pg1 interface.
+ - Create iACL with TCP IP protocol.
+ - Send and verify received packets on pg1 interface.
+ """
+
+ # Basic iACL testing with TCP protocol
+ pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes,
+ TCP(sport=1234, dport=5678))
+ self.pg0.add_stream(pkts)
+
+ key = 'proto_tcp'
+ self.create_classify_table(key, self.build_ip_mask(proto='ff'))
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip_match(proto=socket.IPPROTO_TCP))
+ self.input_acl_set_interface(
+ self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg1.get_capture(len(pkts))
+ self.verify_capture(self.pg1, pkts, TCP)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+ self.pg3.assert_nothing_captured(remark="packets forwarded")
+
+ def test_iacl_proto_tcp_sport(self):
+ """ TCP source port iACL test
+
+ Test scenario for basic protocol ACL with TCP and sport
+ - Create IPv4 stream for pg0 -> pg1 interface.
+ - Create iACL with TCP IP protocol and defined sport.
+ - Send and verify received packets on pg1 interface.
+ """
+
+ # Basic iACL testing with TCP and sport
+ sport = 38
+ pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes,
+ TCP(sport=sport, dport=5678))
+ self.pg0.add_stream(pkts)
+
+ key = 'proto_tcp_sport'
+ self.create_classify_table(
+ key, self.build_ip_mask(proto='ff', src_port='ffff'))
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip_match(proto=socket.IPPROTO_TCP, src_port=sport))
+ self.input_acl_set_interface(
+ self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg1.get_capture(len(pkts))
+ self.verify_capture(self.pg1, pkts, TCP)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+ self.pg3.assert_nothing_captured(remark="packets forwarded")
+
+ def test_iacl_proto_tcp_dport(self):
+ """ TCP destination port iACL test
+
+ Test scenario for basic protocol ACL with TCP and dport
+ - Create IPv4 stream for pg0 -> pg1 interface.
+ - Create iACL with TCP IP protocol and defined dport.
+ - Send and verify received packets on pg1 interface.
+ """
+
+ # Basic iACL testing with TCP and dport
+ dport = 427
+ pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes,
+ TCP(sport=1234, dport=dport))
+ self.pg0.add_stream(pkts)
+
+ key = 'proto_tcp_sport'
+ self.create_classify_table(
+ key, self.build_ip_mask(proto='ff', dst_port='ffff'))
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip_match(proto=socket.IPPROTO_TCP, dst_port=dport))
+ self.input_acl_set_interface(
+ self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg1.get_capture(len(pkts))
+ self.verify_capture(self.pg1, pkts, TCP)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+ self.pg3.assert_nothing_captured(remark="packets forwarded")
+
+ def test_iacl_proto_tcp_sport_dport(self):
+ """ TCP source and destination ports iACL test
+
+ Test scenario for basic protocol ACL with TCP and sport and dport
+ - Create IPv4 stream for pg0 -> pg1 interface.
+ - Create iACL with TCP IP protocol and defined sport and dport.
+ - Send and verify received packets on pg1 interface.
+ """
+
+ # Basic iACL testing with TCP and sport and dport
+ sport = 13720
+ dport = 9080
+ pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes,
+ TCP(sport=sport, dport=dport))
+ self.pg0.add_stream(pkts)
+
+ key = 'proto_tcp_ports'
+ self.create_classify_table(
+ key,
+ self.build_ip_mask(proto='ff', src_port='ffff', dst_port='ffff'))
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip_match(proto=socket.IPPROTO_TCP, src_port=sport,
+ dst_port=dport))
+ self.input_acl_set_interface(
+ self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg1.get_capture(len(pkts))
+ self.verify_capture(self.pg1, pkts, TCP)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+ self.pg3.assert_nothing_captured(remark="packets forwarded")
+
+
+class TestClassifierIPOut(TestClassifier):
+ """ Classifier output IP Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestClassifierIPOut, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestClassifierIPOut, cls).tearDownClass()
+
+ def test_acl_ip_out(self):
+ """ Output IP ACL test
+
+ Test scenario for basic IP ACL with source IP
+ - Create IPv4 stream for pg1 -> pg0 interface.
+ - Create ACL with source IP address.
+ - Send and verify received packets on pg0 interface.
+ """
+
+ # Basic oACL testing with source IP
+ pkts = self.create_stream(self.pg1, self.pg0, self.pg_if_packet_sizes)
+ self.pg1.add_stream(pkts)
+
+ key = 'ip_out'
+ self.create_classify_table(
+ key, self.build_ip_mask(src_ip='ffffffff'), data_offset=0)
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip_match(src_ip=self.pg1.remote_ip4))
+ self.output_acl_set_interface(self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg0.get_capture(len(pkts))
+ self.verify_capture(self.pg0, pkts)
+ self.pg1.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+ self.pg3.assert_nothing_captured(remark="packets forwarded")
+
+
+class TestClassifierMAC(TestClassifier):
+ """ Classifier MAC Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestClassifierMAC, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestClassifierMAC, cls).tearDownClass()
+
+ def test_acl_mac(self):
+ """ MAC ACL test
+
+ Test scenario for basic MAC ACL with source MAC
+ - Create IPv4 stream for pg0 -> pg2 interface.
+ - Create ACL with source MAC address.
+ - Send and verify received packets on pg2 interface.
+ """
+
+ # Basic iACL testing with source MAC
+ pkts = self.create_stream(self.pg0, self.pg2, self.pg_if_packet_sizes)
+ self.pg0.add_stream(pkts)
+
+ key = 'mac'
+ self.create_classify_table(
+ key, self.build_mac_mask(src_mac='ffffffffffff'), data_offset=-14)
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_mac_match(src_mac=self.pg0.remote_mac))
+ self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg2.get_capture(len(pkts))
+ self.verify_capture(self.pg2, pkts)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg1.assert_nothing_captured(remark="packets forwarded")
+ self.pg3.assert_nothing_captured(remark="packets forwarded")
+
+
+class TestClassifierPBR(TestClassifier):
+ """ Classifier PBR Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestClassifierPBR, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestClassifierPBR, cls).tearDownClass()
+
+ def test_acl_pbr(self):
+ """ IP PBR test
+
+ Test scenario for PBR with source IP
+ - Create IPv4 stream for pg0 -> pg3 interface.
+ - Configure PBR fib entry for packet forwarding.
+ - Send and verify received packets on pg3 interface.
+ """
+
+ # PBR testing with source IP
+ pkts = self.create_stream(self.pg0, self.pg3, self.pg_if_packet_sizes)
+ self.pg0.add_stream(pkts)
+
+ key = 'pbr'
+ self.create_classify_table(key, self.build_ip_mask(src_ip='ffffffff'))
+ pbr_option = 1
+ # this will create the VRF/table in which we will insert the route
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip_match(src_ip=self.pg0.remote_ip4),
+ pbr_option, self.pbr_vrfid)
+ self.assertTrue(self.verify_vrf(self.pbr_vrfid))
+ r = VppIpRoute(self, self.pg3.local_ip4, 24,
+ [VppRoutePath(self.pg3.remote_ip4,
+ INVALID_INDEX)],
+ table_id=self.pbr_vrfid)
+ r.add_vpp_config()
+
+ self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get(key))
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg3.get_capture(len(pkts))
+ self.verify_capture(self.pg3, pkts)
+ self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get(key), 0)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg1.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+
+ # remove the classify session and the route
+ r.remove_vpp_config()
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip_match(src_ip=self.pg0.remote_ip4),
+ pbr_option, self.pbr_vrfid, is_add=0)
+
+ # and the table should be gone.
+ self.assertFalse(self.verify_vrf(self.pbr_vrfid))
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_classifier_ip6.py b/test/test_classifier_ip6.py
new file mode 100644
index 00000000000..211374b5ec6
--- /dev/null
+++ b/test/test_classifier_ip6.py
@@ -0,0 +1,490 @@
+#!/usr/bin/env python3
+
+import unittest
+import socket
+import binascii
+
+from framework import VppTestCase, VppTestRunner
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet6 import IPv6, UDP, TCP
+from util import ppp
+from template_classifier import TestClassifier
+
+
+class TestClassifierIP6(TestClassifier):
+ """ Classifier IP6 Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestClassifierIP6, cls).setUpClass()
+ cls.af = socket.AF_INET6
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestClassifierIP6, cls).tearDownClass()
+
+ def test_iacl_src_ip(self):
+ """ Source IP6 iACL test
+
+ Test scenario for basic IP ACL with source IP
+ - Create IPv6 stream for pg0 -> pg1 interface.
+ - Create iACL with source IP address.
+ - Send and verify received packets on pg1 interface.
+ """
+
+ # Basic iACL testing with source IP
+ pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes)
+ self.pg0.add_stream(pkts)
+
+ key = 'ip6_src'
+ self.create_classify_table(
+ key,
+ self.build_ip6_mask(src_ip='ffffffffffffffffffffffffffffffff'))
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip6_match(src_ip=self.pg0.remote_ip6))
+ self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg1.get_capture(len(pkts))
+ self.verify_capture(self.pg1, pkts)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+
+ def test_iacl_dst_ip(self):
+ """ Destination IP6 iACL test
+
+ Test scenario for basic IP ACL with destination IP
+ - Create IPv6 stream for pg0 -> pg1 interface.
+ - Create iACL with destination IP address.
+ - Send and verify received packets on pg1 interface.
+ """
+
+ # Basic iACL testing with destination IP
+ pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes)
+ self.pg0.add_stream(pkts)
+
+ key = 'ip6_dst'
+ self.create_classify_table(
+ key,
+ self.build_ip6_mask(dst_ip='ffffffffffffffffffffffffffffffff'))
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip6_match(dst_ip=self.pg1.remote_ip6))
+ self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg1.get_capture(len(pkts))
+ self.verify_capture(self.pg1, pkts)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+
+ def test_iacl_src_dst_ip(self):
+ """ Source and destination IP6 iACL test
+
+ Test scenario for basic IP ACL with source and destination IP
+ - Create IPv4 stream for pg0 -> pg1 interface.
+ - Create iACL with source and destination IP addresses.
+ - Send and verify received packets on pg1 interface.
+ """
+
+ # Basic iACL testing with source and destination IP
+ pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes)
+ self.pg0.add_stream(pkts)
+
+ key = 'ip6'
+ self.create_classify_table(
+ key,
+ self.build_ip6_mask(src_ip='ffffffffffffffffffffffffffffffff',
+ dst_ip='ffffffffffffffffffffffffffffffff'))
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip6_match(src_ip=self.pg0.remote_ip6,
+ dst_ip=self.pg1.remote_ip6))
+ self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg1.get_capture(len(pkts))
+ self.verify_capture(self.pg1, pkts)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+
+
+# Tests split to different test case classes because of issue reported in
+# ticket VPP-1336
+class TestClassifierIP6UDP(TestClassifier):
+ """ Classifier IP6 UDP proto Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestClassifierIP6UDP, cls).setUpClass()
+ cls.af = socket.AF_INET6
+
+ def test_iacl_proto_udp(self):
+ """ IP6 UDP protocol iACL test
+
+ Test scenario for basic protocol ACL with UDP protocol
+ - Create IPv6 stream for pg0 -> pg1 interface.
+ - Create iACL with UDP IP protocol.
+ - Send and verify received packets on pg1 interface.
+ """
+
+ # Basic iACL testing with UDP protocol
+ pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes)
+ self.pg0.add_stream(pkts)
+
+ key = 'nh_udp'
+ self.create_classify_table(key, self.build_ip6_mask(nh='ff'))
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip6_match(nh=socket.IPPROTO_UDP))
+ self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg1.get_capture(len(pkts))
+ self.verify_capture(self.pg1, pkts)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+
+ def test_iacl_proto_udp_sport(self):
+ """ IP6 UDP source port iACL test
+
+ Test scenario for basic protocol ACL with UDP and sport
+ - Create IPv6 stream for pg0 -> pg1 interface.
+ - Create iACL with UDP IP protocol and defined sport.
+ - Send and verify received packets on pg1 interface.
+ """
+
+ # Basic iACL testing with UDP and sport
+ sport = 38
+ pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes,
+ UDP(sport=sport, dport=5678))
+ self.pg0.add_stream(pkts)
+
+ key = 'nh_udp_sport'
+ self.create_classify_table(
+ key, self.build_ip6_mask(nh='ff', src_port='ffff'))
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip6_match(nh=socket.IPPROTO_UDP, src_port=sport))
+ self.input_acl_set_interface(
+ self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg1.get_capture(len(pkts))
+ self.verify_capture(self.pg1, pkts)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+
+ def test_iacl_proto_udp_dport(self):
+ """ IP6 UDP destination port iACL test
+
+ Test scenario for basic protocol ACL with UDP and dport
+ - Create IPv6 stream for pg0 -> pg1 interface.
+ - Create iACL with UDP IP protocol and defined dport.
+ - Send and verify received packets on pg1 interface.
+ """
+
+ # Basic iACL testing with UDP and dport
+ dport = 427
+ pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes,
+ UDP(sport=1234, dport=dport))
+ self.pg0.add_stream(pkts)
+
+ key = 'nh_udp_dport'
+ self.create_classify_table(
+ key, self.build_ip6_mask(nh='ff', dst_port='ffff'))
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip6_match(nh=socket.IPPROTO_UDP, dst_port=dport))
+ self.input_acl_set_interface(
+ self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg1.get_capture(len(pkts))
+ self.verify_capture(self.pg1, pkts)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+
+ def test_iacl_proto_udp_sport_dport(self):
+ """ IP6 UDP source and destination ports iACL test
+
+ Test scenario for basic protocol ACL with UDP and sport and dport
+ - Create IPv6 stream for pg0 -> pg1 interface.
+ - Create iACL with UDP IP protocol and defined sport and dport.
+ - Send and verify received packets on pg1 interface.
+ """
+
+ # Basic iACL testing with UDP and sport and dport
+ sport = 13720
+ dport = 9080
+ pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes,
+ UDP(sport=sport, dport=dport))
+ self.pg0.add_stream(pkts)
+
+ key = 'nh_udp_ports'
+ self.create_classify_table(
+ key,
+ self.build_ip6_mask(nh='ff', src_port='ffff', dst_port='ffff'))
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip6_match(nh=socket.IPPROTO_UDP, src_port=sport,
+ dst_port=dport))
+ self.input_acl_set_interface(
+ self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg1.get_capture(len(pkts))
+ self.verify_capture(self.pg1, pkts)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+
+
+class TestClassifierIP6TCP(TestClassifier):
+ """ Classifier IP6 TCP proto Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestClassifierIP6TCP, cls).setUpClass()
+ cls.af = socket.AF_INET6
+
+ def test_iacl_proto_tcp(self):
+ """ IP6 TCP protocol iACL test
+
+ Test scenario for basic protocol ACL with TCP protocol
+ - Create IPv6 stream for pg0 -> pg1 interface.
+ - Create iACL with TCP IP protocol.
+ - Send and verify received packets on pg1 interface.
+ """
+
+ # Basic iACL testing with TCP protocol
+ pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes,
+ TCP(sport=1234, dport=5678))
+ self.pg0.add_stream(pkts)
+
+ key = 'nh_tcp'
+ self.create_classify_table(key, self.build_ip6_mask(nh='ff'))
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip6_match(nh=socket.IPPROTO_TCP))
+ self.input_acl_set_interface(
+ self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg1.get_capture(len(pkts))
+ self.verify_capture(self.pg1, pkts, TCP)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+
+ def test_iacl_proto_tcp_sport(self):
+ """ IP6 TCP source port iACL test
+
+ Test scenario for basic protocol ACL with TCP and sport
+ - Create IPv6 stream for pg0 -> pg1 interface.
+ - Create iACL with TCP IP protocol and defined sport.
+ - Send and verify received packets on pg1 interface.
+ """
+
+ # Basic iACL testing with TCP and sport
+ sport = 38
+ pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes,
+ TCP(sport=sport, dport=5678))
+ self.pg0.add_stream(pkts)
+
+ key = 'nh_tcp_sport'
+ self.create_classify_table(
+ key, self.build_ip6_mask(nh='ff', src_port='ffff'))
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip6_match(nh=socket.IPPROTO_TCP, src_port=sport))
+ self.input_acl_set_interface(
+ self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg1.get_capture(len(pkts))
+ self.verify_capture(self.pg1, pkts, TCP)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+
+ def test_iacl_proto_tcp_dport(self):
+ """ IP6 TCP destination port iACL test
+
+ Test scenario for basic protocol ACL with TCP and dport
+ - Create IPv6 stream for pg0 -> pg1 interface.
+ - Create iACL with TCP IP protocol and defined dport.
+ - Send and verify received packets on pg1 interface.
+ """
+
+ # Basic iACL testing with TCP and dport
+ dport = 427
+ pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes,
+ TCP(sport=1234, dport=dport))
+ self.pg0.add_stream(pkts)
+
+ key = 'nh_tcp_dport'
+ self.create_classify_table(
+ key, self.build_ip6_mask(nh='ff', dst_port='ffff'))
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip6_match(nh=socket.IPPROTO_TCP, dst_port=dport))
+ self.input_acl_set_interface(
+ self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg1.get_capture(len(pkts))
+ self.verify_capture(self.pg1, pkts, TCP)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+
+ def test_iacl_proto_tcp_sport_dport(self):
+ """ IP6 TCP source and destination ports iACL test
+
+ Test scenario for basic protocol ACL with TCP and sport and dport
+ - Create IPv6 stream for pg0 -> pg1 interface.
+ - Create iACL with TCP IP protocol and defined sport and dport.
+ - Send and verify received packets on pg1 interface.
+ """
+
+ # Basic iACL testing with TCP and sport and dport
+ sport = 13720
+ dport = 9080
+ pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes,
+ TCP(sport=sport, dport=dport))
+ self.pg0.add_stream(pkts)
+
+ key = 'nh_tcp_ports'
+ self.create_classify_table(
+ key,
+ self.build_ip6_mask(nh='ff', src_port='ffff', dst_port='ffff'))
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip6_match(nh=socket.IPPROTO_TCP, src_port=sport,
+ dst_port=dport))
+ self.input_acl_set_interface(
+ self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg1.get_capture(len(pkts))
+ self.verify_capture(self.pg1, pkts, TCP)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+
+
+class TestClassifierIP6Out(TestClassifier):
+ """ Classifier output IP6 Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestClassifierIP6Out, cls).setUpClass()
+ cls.af = socket.AF_INET6
+
+ def test_acl_ip_out(self):
+ """ Output IP6 ACL test
+
+ Test scenario for basic IP ACL with source IP
+ - Create IPv6 stream for pg1 -> pg0 interface.
+ - Create ACL with source IP address.
+ - Send and verify received packets on pg0 interface.
+ """
+
+ # Basic oACL testing with source IP
+ pkts = self.create_stream(self.pg1, self.pg0, self.pg_if_packet_sizes)
+ self.pg1.add_stream(pkts)
+
+ key = 'ip6_out'
+ self.create_classify_table(
+ key,
+ self.build_ip6_mask(src_ip='ffffffffffffffffffffffffffffffff'),
+ data_offset=0)
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_ip6_match(src_ip=self.pg1.remote_ip6))
+ self.output_acl_set_interface(
+ self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg0.get_capture(len(pkts))
+ self.verify_capture(self.pg0, pkts)
+ self.pg1.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+
+
+class TestClassifierIP6MAC(TestClassifier):
+ """ Classifier IP6 MAC Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestClassifierIP6MAC, cls).setUpClass()
+ cls.af = socket.AF_INET6
+
+ def test_acl_mac(self):
+ """ IP6 MAC iACL test
+
+ Test scenario for basic MAC ACL with source MAC
+ - Create IPv6 stream for pg0 -> pg2 interface.
+ - Create ACL with source MAC address.
+ - Send and verify received packets on pg2 interface.
+ """
+
+ # Basic iACL testing with source MAC
+ pkts = self.create_stream(self.pg0, self.pg2, self.pg_if_packet_sizes)
+ self.pg0.add_stream(pkts)
+
+ key = 'mac'
+ self.create_classify_table(
+ key, self.build_mac_mask(src_mac='ffffffffffff'), data_offset=-14)
+ self.create_classify_session(
+ self.acl_tbl_idx.get(key),
+ self.build_mac_match(src_mac=self.pg0.remote_mac))
+ self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg2.get_capture(len(pkts))
+ self.verify_capture(self.pg2, pkts)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg1.assert_nothing_captured(remark="packets forwarded")
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_classify_l2_acl.py b/test/test_classify_l2_acl.py
new file mode 100644
index 00000000000..b1309881e58
--- /dev/null
+++ b/test/test_classify_l2_acl.py
@@ -0,0 +1,608 @@
+#!/usr/bin/env python3
+""" Classifier-based L2 ACL Test Case HLD:
+"""
+
+import unittest
+import random
+import binascii
+import socket
+
+
+from scapy.packet import Raw
+from scapy.data import ETH_P_IP
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, TCP, UDP, ICMP
+from scapy.layers.inet6 import IPv6, ICMPv6EchoRequest
+from scapy.layers.inet6 import IPv6ExtHdrFragment
+from framework import VppTestCase, VppTestRunner
+from util import Host, ppp
+from template_classifier import TestClassifier
+
+
+class TestClassifyAcl(TestClassifier):
+ """ Classifier-based L2 input and output ACL Test Case """
+
+ # traffic types
+ IP = 0
+ ICMP = 1
+
+ # IP version
+ IPRANDOM = -1
+ IPV4 = 0
+ IPV6 = 1
+
+ # rule types
+ DENY = 0
+ PERMIT = 1
+
+ # supported protocols
+ proto = [[6, 17], [1, 58]]
+ proto_map = {1: 'ICMP', 58: 'ICMPv6EchoRequest', 6: 'TCP', 17: 'UDP'}
+ ICMPv4 = 0
+ ICMPv6 = 1
+ TCP = 0
+ UDP = 1
+ PROTO_ALL = 0
+
+ # port ranges
+ PORTS_ALL = -1
+ PORTS_RANGE = 0
+ PORTS_RANGE_2 = 1
+ udp_sport_from = 10
+ udp_sport_to = udp_sport_from + 5
+ udp_dport_from = 20000
+ udp_dport_to = udp_dport_from + 5000
+ tcp_sport_from = 30
+ tcp_sport_to = tcp_sport_from + 5
+ tcp_dport_from = 40000
+ tcp_dport_to = tcp_dport_from + 5000
+
+ udp_sport_from_2 = 90
+ udp_sport_to_2 = udp_sport_from_2 + 5
+ udp_dport_from_2 = 30000
+ udp_dport_to_2 = udp_dport_from_2 + 5000
+ tcp_sport_from_2 = 130
+ tcp_sport_to_2 = tcp_sport_from_2 + 5
+ tcp_dport_from_2 = 20000
+ tcp_dport_to_2 = tcp_dport_from_2 + 5000
+
+ icmp4_type = 8 # echo request
+ icmp4_code = 3
+ icmp6_type = 128 # echo request
+ icmp6_code = 3
+
+ icmp4_type_2 = 8
+ icmp4_code_from_2 = 5
+ icmp4_code_to_2 = 20
+ icmp6_type_2 = 128
+ icmp6_code_from_2 = 8
+ icmp6_code_to_2 = 42
+
+ # Test variables
+ bd_id = 1
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Perform standard class setup (defined by class method setUpClass in
+ class VppTestCase) before running the test case, set test case related
+ variables and configure VPP.
+ """
+ super(TestClassifyAcl, cls).setUpClass()
+ cls.af = None
+
+ try:
+ # Create 2 pg interfaces
+ cls.create_pg_interfaces(range(2))
+
+ # Packet flows mapping pg0 -> pg1, pg2 etc.
+ cls.flows = dict()
+ cls.flows[cls.pg0] = [cls.pg1]
+
+ # Packet sizes
+ cls.pg_if_packet_sizes = [64, 512, 1518, 9018]
+
+ # Create BD with MAC learning and unknown unicast flooding disabled
+ # and put interfaces to this BD
+ cls.vapi.bridge_domain_add_del(bd_id=cls.bd_id, uu_flood=1,
+ learn=1)
+ for pg_if in cls.pg_interfaces:
+ cls.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=pg_if.sw_if_index, bd_id=cls.bd_id)
+
+ # Set up all interfaces
+ for i in cls.pg_interfaces:
+ i.admin_up()
+
+ # Mapping between packet-generator index and lists of test hosts
+ cls.hosts_by_pg_idx = dict()
+ for pg_if in cls.pg_interfaces:
+ cls.hosts_by_pg_idx[pg_if.sw_if_index] = []
+
+ # Create list of deleted hosts
+ cls.deleted_hosts_by_pg_idx = dict()
+ for pg_if in cls.pg_interfaces:
+ cls.deleted_hosts_by_pg_idx[pg_if.sw_if_index] = []
+
+ # warm-up the mac address tables
+ # self.warmup_test()
+
+ # Holder of the active classify table key
+ cls.acl_active_table = ''
+
+ except Exception:
+ super(TestClassifyAcl, cls).tearDownClass()
+ raise
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestClassifyAcl, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestClassifyAcl, self).setUp()
+ self.acl_tbl_idx = {}
+
+ def tearDown(self):
+ """
+ Show various debug prints after each test.
+ """
+ if not self.vpp_dead:
+ if self.acl_active_table == 'mac_inout':
+ self.output_acl_set_interface(
+ self.pg1, self.acl_tbl_idx.get(self.acl_active_table), 0)
+ self.input_acl_set_interface(
+ self.pg0, self.acl_tbl_idx.get(self.acl_active_table), 0)
+ self.acl_active_table = ''
+ elif self.acl_active_table == 'mac_out':
+ self.output_acl_set_interface(
+ self.pg1, self.acl_tbl_idx.get(self.acl_active_table), 0)
+ self.acl_active_table = ''
+ elif self.acl_active_table == 'mac_in':
+ self.input_acl_set_interface(
+ self.pg0, self.acl_tbl_idx.get(self.acl_active_table), 0)
+ self.acl_active_table = ''
+
+ super(TestClassifyAcl, self).tearDown()
+
+ def create_classify_session(self, intf, table_index, match,
+ hit_next_index=0xffffffff, is_add=1):
+ """Create Classify Session
+
+ :param VppInterface intf: Interface to apply classify session.
+ :param int table_index: table index to identify classify table.
+ :param str match: matched value for interested traffic.
+ :param int is_add: option to configure classify session.
+ - create(1) or delete(0)
+ """
+ mask_match, mask_match_len = self._resolve_mask_match(match)
+ r = self.vapi.classify_add_del_session(
+ is_add=is_add,
+ table_index=table_index,
+ match=mask_match,
+ match_len=mask_match_len,
+ hit_next_index=hit_next_index)
+ self.assertIsNotNone(r, 'No response msg for add_del_session')
+
+ def create_hosts(self, count, start=0):
+ """
+ Create required number of host MAC addresses and distribute them among
+ interfaces. Create host IPv4 address for every host MAC address.
+
+ :param int count: Number of hosts to create MAC/IPv4 addresses for.
+ :param int start: Number to start numbering from.
+ """
+ n_int = len(self.pg_interfaces)
+ macs_per_if = count // n_int
+ i = -1
+ for pg_if in self.pg_interfaces:
+ i += 1
+ start_nr = macs_per_if * i + start
+ end_nr = count + start if i == (n_int - 1) \
+ else macs_per_if * (i + 1) + start
+ hosts = self.hosts_by_pg_idx[pg_if.sw_if_index]
+ for j in range(start_nr, end_nr):
+ host = Host(
+ "00:00:00:ff:%02x:%02x" % (pg_if.sw_if_index, j),
+ "172.17.1%02x.%u" % (pg_if.sw_if_index, j),
+ "2017:dead:%02x::%u" % (pg_if.sw_if_index, j))
+ hosts.append(host)
+
+ def create_upper_layer(self, packet_index, proto, ports=0):
+ p = self.proto_map[proto]
+ if p == 'UDP':
+ if ports == 0:
+ return UDP(sport=random.randint(self.udp_sport_from,
+ self.udp_sport_to),
+ dport=random.randint(self.udp_dport_from,
+ self.udp_dport_to))
+ else:
+ return UDP(sport=ports, dport=ports)
+ elif p == 'TCP':
+ if ports == 0:
+ return TCP(sport=random.randint(self.tcp_sport_from,
+ self.tcp_sport_to),
+ dport=random.randint(self.tcp_dport_from,
+ self.tcp_dport_to))
+ else:
+ return TCP(sport=ports, dport=ports)
+ return ''
+
+ def create_stream(self, src_if, packet_sizes, traffic_type=0, ipv6=0,
+ proto=-1, ports=0, fragments=False,
+ pkt_raw=True, etype=-1):
+ """
+ Create input packet stream for defined interface using hosts or
+ deleted_hosts list.
+
+ :param object src_if: Interface to create packet stream for.
+ :param list packet_sizes: List of required packet sizes.
+ :param traffic_type: 1: ICMP packet, 2: IPv6 with EH, 0: otherwise.
+ :return: Stream of packets.
+ """
+ pkts = []
+ if self.flows.__contains__(src_if):
+ src_hosts = self.hosts_by_pg_idx[src_if.sw_if_index]
+ for dst_if in self.flows[src_if]:
+ dst_hosts = self.hosts_by_pg_idx[dst_if.sw_if_index]
+ n_int = len(dst_hosts) * len(src_hosts)
+ for i in range(0, n_int):
+ dst_host = dst_hosts[i // len(src_hosts)]
+ src_host = src_hosts[i % len(src_hosts)]
+ pkt_info = self.create_packet_info(src_if, dst_if)
+ if ipv6 == 1:
+ pkt_info.ip = 1
+ elif ipv6 == 0:
+ pkt_info.ip = 0
+ else:
+ pkt_info.ip = random.choice([0, 1])
+ if proto == -1:
+ pkt_info.proto = random.choice(self.proto[self.IP])
+ else:
+ pkt_info.proto = proto
+ payload = self.info_to_payload(pkt_info)
+ p = Ether(dst=dst_host.mac, src=src_host.mac)
+ if etype > 0:
+ p = Ether(dst=dst_host.mac,
+ src=src_host.mac,
+ type=etype)
+ if pkt_info.ip:
+ p /= IPv6(dst=dst_host.ip6, src=src_host.ip6)
+ if fragments:
+ p /= IPv6ExtHdrFragment(offset=64, m=1)
+ else:
+ if fragments:
+ p /= IP(src=src_host.ip4, dst=dst_host.ip4,
+ flags=1, frag=64)
+ else:
+ p /= IP(src=src_host.ip4, dst=dst_host.ip4)
+ if traffic_type == self.ICMP:
+ if pkt_info.ip:
+ p /= ICMPv6EchoRequest(type=self.icmp6_type,
+ code=self.icmp6_code)
+ else:
+ p /= ICMP(type=self.icmp4_type,
+ code=self.icmp4_code)
+ else:
+ p /= self.create_upper_layer(i, pkt_info.proto, ports)
+ if pkt_raw:
+ p /= Raw(payload)
+ pkt_info.data = p.copy()
+ if pkt_raw:
+ size = random.choice(packet_sizes)
+ self.extend_packet(p, size)
+ pkts.append(p)
+ return pkts
+
+ def verify_capture(self, pg_if, capture,
+ traffic_type=0, ip_type=0, etype=-1):
+ """
+ Verify captured input packet stream for defined interface.
+
+ :param object pg_if: Interface to verify captured packet stream for.
+ :param list capture: Captured packet stream.
+ :param traffic_type: 1: ICMP packet, 2: IPv6 with EH, 0: otherwise.
+ """
+ last_info = dict()
+ for i in self.pg_interfaces:
+ last_info[i.sw_if_index] = None
+ dst_sw_if_index = pg_if.sw_if_index
+ for packet in capture:
+ if etype > 0:
+ if packet[Ether].type != etype:
+ self.logger.error(ppp("Unexpected ethertype in packet:",
+ packet))
+ else:
+ continue
+ try:
+ # Raw data for ICMPv6 are stored in ICMPv6EchoRequest.data
+ if traffic_type == self.ICMP and ip_type == self.IPV6:
+ payload_info = self.payload_to_info(
+ packet[ICMPv6EchoRequest].data)
+ payload = packet[ICMPv6EchoRequest]
+ else:
+ payload_info = self.payload_to_info(packet[Raw])
+ payload = packet[self.proto_map[payload_info.proto]]
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet "
+ "(outside network):", packet))
+ raise
+
+ if ip_type != 0:
+ self.assertEqual(payload_info.ip, ip_type)
+ if traffic_type == self.ICMP:
+ try:
+ if payload_info.ip == 0:
+ self.assertEqual(payload.type, self.icmp4_type)
+ self.assertEqual(payload.code, self.icmp4_code)
+ else:
+ self.assertEqual(payload.type, self.icmp6_type)
+ self.assertEqual(payload.code, self.icmp6_code)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet "
+ "(outside network):", packet))
+ raise
+ else:
+ try:
+ ip_version = IPv6 if payload_info.ip == 1 else IP
+
+ ip = packet[ip_version]
+ packet_index = payload_info.index
+
+ self.assertEqual(payload_info.dst, dst_sw_if_index)
+ self.logger.debug("Got packet on port %s: src=%u (id=%u)" %
+ (pg_if.name, payload_info.src,
+ packet_index))
+ next_info = self.get_next_packet_info_for_interface2(
+ payload_info.src, dst_sw_if_index,
+ last_info[payload_info.src])
+ last_info[payload_info.src] = next_info
+ self.assertTrue(next_info is not None)
+ self.assertEqual(packet_index, next_info.index)
+ saved_packet = next_info.data
+ # Check standard fields
+ self.assertEqual(ip.src, saved_packet[ip_version].src)
+ self.assertEqual(ip.dst, saved_packet[ip_version].dst)
+ p = self.proto_map[payload_info.proto]
+ if p == 'TCP':
+ tcp = packet[TCP]
+ self.assertEqual(tcp.sport, saved_packet[
+ TCP].sport)
+ self.assertEqual(tcp.dport, saved_packet[
+ TCP].dport)
+ elif p == 'UDP':
+ udp = packet[UDP]
+ self.assertEqual(udp.sport, saved_packet[
+ UDP].sport)
+ self.assertEqual(udp.dport, saved_packet[
+ UDP].dport)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:",
+ packet))
+ raise
+ for i in self.pg_interfaces:
+ remaining_packet = self.get_next_packet_info_for_interface2(
+ i, dst_sw_if_index, last_info[i.sw_if_index])
+ self.assertTrue(
+ remaining_packet is None,
+ "Port %u: Packet expected from source %u didn't arrive" %
+ (dst_sw_if_index, i.sw_if_index))
+
+ def run_traffic_no_check(self):
+ # Test
+ # Create incoming packet streams for packet-generator interfaces
+ for i in self.pg_interfaces:
+ if self.flows.__contains__(i):
+ pkts = self.create_stream(i, self.pg_if_packet_sizes)
+ if len(pkts) > 0:
+ i.add_stream(pkts)
+
+ # Enable packet capture and start packet sending
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ def run_verify_test(self, traffic_type=0, ip_type=0, proto=-1, ports=0,
+ frags=False, pkt_raw=True, etype=-1):
+ # Test
+ # Create incoming packet streams for packet-generator interfaces
+ pkts_cnt = 0
+ for i in self.pg_interfaces:
+ if self.flows.__contains__(i):
+ pkts = self.create_stream(i, self.pg_if_packet_sizes,
+ traffic_type, ip_type, proto, ports,
+ frags, pkt_raw, etype)
+ if len(pkts) > 0:
+ i.add_stream(pkts)
+ pkts_cnt += len(pkts)
+
+ # Enable packet capture and start packet sendingself.IPV
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # Verify
+ # Verify outgoing packet streams per packet-generator interface
+ for src_if in self.pg_interfaces:
+ if self.flows.__contains__(src_if):
+ for dst_if in self.flows[src_if]:
+ capture = dst_if.get_capture(pkts_cnt)
+ self.logger.info("Verifying capture on interface %s" %
+ dst_if.name)
+ self.verify_capture(dst_if, capture,
+ traffic_type, ip_type, etype)
+
+ def run_verify_negat_test(self, traffic_type=0, ip_type=0, proto=-1,
+ ports=0, frags=False, etype=-1):
+ # Test
+ self.reset_packet_infos()
+ for i in self.pg_interfaces:
+ if self.flows.__contains__(i):
+ pkts = self.create_stream(i, self.pg_if_packet_sizes,
+ traffic_type, ip_type, proto, ports,
+ frags, True, etype)
+ if len(pkts) > 0:
+ i.add_stream(pkts)
+
+ # Enable packet capture and start packet sending
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # Verify
+ # Verify outgoing packet streams per packet-generator interface
+ for src_if in self.pg_interfaces:
+ if self.flows.__contains__(src_if):
+ for dst_if in self.flows[src_if]:
+ self.logger.info("Verifying capture on interface %s" %
+ dst_if.name)
+ capture = dst_if.get_capture(0)
+ self.assertEqual(len(capture), 0)
+
+ def build_classify_table(self, src_mac='', dst_mac='', ether_type='',
+ etype='', key='mac', hit_next_index=0xffffffff):
+ # Basic ACL testing
+ a_mask = self.build_mac_mask(src_mac=src_mac, dst_mac=dst_mac,
+ ether_type=ether_type)
+ self.create_classify_table(key, a_mask)
+ for host in self.hosts_by_pg_idx[self.pg0.sw_if_index]:
+ s_mac = host.mac if src_mac else ''
+ if dst_mac:
+ for dst_if in self.flows[self.pg0]:
+ for dst_host in self.hosts_by_pg_idx[dst_if.sw_if_index]:
+ self.create_classify_session(
+ self.pg0, self.acl_tbl_idx.get(key),
+ self.build_mac_match(src_mac=s_mac,
+ dst_mac=dst_host.mac,
+ ether_type=etype),
+ hit_next_index=hit_next_index)
+ else:
+ self.create_classify_session(
+ self.pg0, self.acl_tbl_idx.get(key),
+ self.build_mac_match(src_mac=s_mac, dst_mac='',
+ ether_type=etype),
+ hit_next_index=hit_next_index)
+
+ def test_0000_warmup_test(self):
+ """ Learn the MAC addresses
+ """
+ self.create_hosts(2)
+ self.run_traffic_no_check()
+
+ def test_0010_inacl_permit_src_mac(self):
+ """ Input L2 ACL test - permit source MAC
+
+ Test scenario for basic IP ACL with source IP
+ - Create IPv4 stream for pg0 -> pg1 interface.
+ - Create ACL with source MAC address.
+ - Send and verify received packets on pg1 interface.
+ """
+ key = 'mac_in'
+ self.build_classify_table(src_mac='ffffffffffff', key=key)
+ self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+ self.run_verify_test(self.IP, self.IPV4, -1)
+
+ def test_0011_inacl_permit_dst_mac(self):
+ """ Input L2 ACL test - permit destination MAC
+
+ Test scenario for basic IP ACL with source IP
+ - Create IPv4 stream for pg0 -> pg1 interface.
+ - Create ACL with destination MAC address.
+ - Send and verify received packets on pg1 interface.
+ """
+ key = 'mac_in'
+ self.build_classify_table(dst_mac='ffffffffffff', key=key)
+ self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+ self.run_verify_test(self.IP, self.IPV4, -1)
+
+ def test_0012_inacl_permit_src_dst_mac(self):
+ """ Input L2 ACL test - permit source and destination MAC
+
+ Test scenario for basic IP ACL with source IP
+ - Create IPv4 stream for pg0 -> pg1 interface.
+ - Create ACL with source and destination MAC addresses.
+ - Send and verify received packets on pg1 interface.
+ """
+ key = 'mac_in'
+ self.build_classify_table(
+ src_mac='ffffffffffff', dst_mac='ffffffffffff', key=key)
+ self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+ self.run_verify_test(self.IP, self.IPV4, -1)
+
+ def test_0013_inacl_permit_ether_type(self):
+ """ Input L2 ACL test - permit ether_type
+
+ Test scenario for basic IP ACL with source IP
+ - Create IPv4 stream for pg0 -> pg1 interface.
+ - Create ACL with destination MAC address.
+ - Send and verify received packets on pg1 interface.
+ """
+ key = 'mac_in'
+ self.build_classify_table(
+ ether_type='ffff', etype=hex(ETH_P_IP)[2:], key=key)
+ self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+ self.run_verify_test(self.IP, self.IPV4, -1)
+
+ def test_0015_inacl_deny(self):
+ """ Input L2 ACL test - deny
+
+ Test scenario for basic IP ACL with source IP
+ - Create IPv4 stream for pg0 -> pg1 interface.
+
+ - Create ACL with source MAC address.
+ - Send and verify no received packets on pg1 interface.
+ """
+ key = 'mac_in'
+ self.build_classify_table(
+ src_mac='ffffffffffff', hit_next_index=0, key=key)
+ self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+ self.run_verify_negat_test(self.IP, self.IPV4, -1)
+
+ def test_0020_outacl_permit(self):
+ """ Output L2 ACL test - permit
+
+ Test scenario for basic IP ACL with source IP
+ - Create IPv4 stream for pg0 -> pg1 interface.
+ - Create ACL with source MAC address.
+ - Send and verify received packets on pg1 interface.
+ """
+ key = 'mac_out'
+ self.build_classify_table(src_mac='ffffffffffff', key=key)
+ self.output_acl_set_interface(self.pg1, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+ self.run_verify_test(self.IP, self.IPV4, -1)
+
+ def test_0025_outacl_deny(self):
+ """ Output L2 ACL test - deny
+
+ Test scenario for basic IP ACL with source IP
+ - Create IPv4 stream for pg0 -> pg1 interface.
+ - Create ACL with source MAC address.
+ - Send and verify no received packets on pg1 interface.
+ """
+ key = 'mac_out'
+ self.build_classify_table(
+ src_mac='ffffffffffff', hit_next_index=0, key=key)
+ self.output_acl_set_interface(self.pg1, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+ self.run_verify_negat_test(self.IP, self.IPV4, -1)
+
+ def test_0030_inoutacl_permit(self):
+ """ Input+Output L2 ACL test - permit
+
+ Test scenario for basic IP ACL with source IP
+ - Create IPv4 stream for pg0 -> pg1 interface.
+ - Create ACLs with source MAC address.
+ - Send and verify received packets on pg1 interface.
+ """
+ key = 'mac_inout'
+ self.build_classify_table(src_mac='ffffffffffff', key=key)
+ self.output_acl_set_interface(self.pg1, self.acl_tbl_idx.get(key))
+ self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get(key))
+ self.acl_active_table = key
+ self.run_verify_test(self.IP, self.IPV4, -1)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_cli.py b/test/test_cli.py
new file mode 100644
index 00000000000..5005bf4c43a
--- /dev/null
+++ b/test/test_cli.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python3
+"""CLI functional tests"""
+
+import datetime
+import time
+import unittest
+
+from vpp_papi import VPPIOError
+
+from framework import VppTestCase, VppTestRunner
+
+
+class TestCLI(VppTestCase):
+ """ CLI Test Case """
+ maxDiff = None
+
+ @classmethod
+ def setUpClass(cls):
+ # using the framework default
+ cls.vapi_response_timeout = 5
+ super(TestCLI, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestCLI, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestCLI, self).setUp()
+
+ def tearDown(self):
+ super(TestCLI, self).tearDown()
+
+ def test_cli_retval(self):
+ """ CLI inband retval """
+ rv = self.vapi.papi.cli_inband(cmd='this command does not exist')
+ self.assertNotEqual(rv.retval, 0)
+
+ rv = self.vapi.papi.cli_inband(cmd='show version')
+ self.assertEqual(rv.retval, 0)
+
+ def test_long_cli_delay(self):
+ """ Test that VppApiClient raises VppIOError if timeout.""" # noqa
+ with self.assertRaises(VPPIOError) as ctx:
+ rv = self.vapi.papi.cli_inband(cmd='wait 10')
+
+ def test_long_cli_delay_override(self):
+ """ Test per-command _timeout option.""" # noqa
+ rv = self.vapi.papi.cli_inband(cmd='wait 10', _timeout=15)
+ self.assertEqual(rv.retval, 0)
+
+
+class TestCLIExtendedVapiTimeout(VppTestCase):
+ maxDiff = None
+
+ @classmethod
+ def setUpClass(cls):
+ cls.vapi_response_timeout = 15
+ cls.__doc__ = " CLI Test Case w/ Extended (%ssec) Vapi Timeout " \
+ % cls.vapi_response_timeout
+ super(TestCLIExtendedVapiTimeout, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestCLIExtendedVapiTimeout, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestCLIExtendedVapiTimeout, self).setUp()
+
+ def tearDown(self):
+ super(TestCLIExtendedVapiTimeout, self).tearDown()
+
+ def test_long_cli_delay(self):
+ """ Test that delayed result returns with extended timeout."""
+ wait_secs = self.vapi_response_timeout - 1
+
+ # get vpp time as float
+ start = self.vapi.papi.show_vpe_system_time(
+ _no_type_conversion=True).vpe_system_time
+ rv = self.vapi.papi.cli_inband(cmd='wait %s' % wait_secs)
+ now = self.vapi.papi.show_vpe_system_time(
+ _no_type_conversion=True).vpe_system_time
+
+ # assume that the overhead of the measurement is not more that .5 sec.
+ self.assertEqual(round(now - start), wait_secs)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_cnat.py b/test/test_cnat.py
new file mode 100644
index 00000000000..ff4c44033cb
--- /dev/null
+++ b/test/test_cnat.py
@@ -0,0 +1,975 @@
+#!/usr/bin/env python3
+
+import unittest
+
+from framework import VppTestCase, VppTestRunner
+from vpp_ip import DpoProto, INVALID_INDEX
+from itertools import product
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, UDP, TCP, ICMP
+from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror
+from scapy.layers.inet6 import IPv6, IPerror6, ICMPv6DestUnreach
+from scapy.layers.inet6 import ICMPv6EchoRequest, ICMPv6EchoReply
+
+import struct
+
+from ipaddress import ip_address, ip_network, \
+ IPv4Address, IPv6Address, IPv4Network, IPv6Network
+
+from vpp_object import VppObject
+from vpp_papi import VppEnum
+
+N_PKTS = 15
+
+
+class Ep(object):
+ """ CNat endpoint """
+
+ def __init__(self, ip=None, port=0, l4p=TCP,
+ sw_if_index=INVALID_INDEX, is_v6=False):
+ self.ip = ip
+ if ip is None:
+ self.ip = "::" if is_v6 else "0.0.0.0"
+ self.port = port
+ self.l4p = l4p
+ self.sw_if_index = sw_if_index
+ if is_v6:
+ self.if_af = VppEnum.vl_api_address_family_t.ADDRESS_IP6
+ else:
+ self.if_af = VppEnum.vl_api_address_family_t.ADDRESS_IP4
+
+ def encode(self):
+ return {'addr': self.ip,
+ 'port': self.port,
+ 'sw_if_index': self.sw_if_index,
+ 'if_af': self.if_af}
+
+ @classmethod
+ def from_pg(cls, pg, is_v6=False):
+ if pg is None:
+ return cls(is_v6=is_v6)
+ else:
+ return cls(sw_if_index=pg.sw_if_index, is_v6=is_v6)
+
+ @property
+ def isV6(self):
+ return ":" in self.ip
+
+ def __str__(self):
+ return ("%s:%d" % (self.ip, self.port))
+
+
+class EpTuple(object):
+ """ CNat endpoint """
+
+ def __init__(self, src, dst):
+ self.src = src
+ self.dst = dst
+
+ def encode(self):
+ return {'src_ep': self.src.encode(),
+ 'dst_ep': self.dst.encode()}
+
+ def __str__(self):
+ return ("%s->%s" % (self.src, self.dst))
+
+
+class VppCNatTranslation(VppObject):
+
+ def __init__(self, test, iproto, vip, paths):
+ self._test = test
+ self.vip = vip
+ self.iproto = iproto
+ self.paths = paths
+ self.encoded_paths = []
+ for path in self.paths:
+ self.encoded_paths.append(path.encode())
+
+ def __str__(self):
+ return ("%s %s %s" % (self.vip, self.iproto, self.paths))
+
+ @property
+ def vl4_proto(self):
+ ip_proto = VppEnum.vl_api_ip_proto_t
+ return {
+ UDP: ip_proto.IP_API_PROTO_UDP,
+ TCP: ip_proto.IP_API_PROTO_TCP,
+ }[self.iproto]
+
+ def add_vpp_config(self):
+ r = self._test.vapi.cnat_translation_update(
+ {'vip': self.vip.encode(),
+ 'ip_proto': self.vl4_proto,
+ 'n_paths': len(self.paths),
+ 'paths': self.encoded_paths})
+ self._test.registry.register(self, self._test.logger)
+ self.id = r.id
+
+ def modify_vpp_config(self, paths):
+ self.paths = paths
+ self.encoded_paths = []
+ for path in self.paths:
+ self.encoded_paths.append(path.encode())
+
+ r = self._test.vapi.cnat_translation_update(
+ {'vip': self.vip.encode(),
+ 'ip_proto': self.vl4_proto,
+ 'n_paths': len(self.paths),
+ 'paths': self.encoded_paths})
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.cnat_translation_del(id=self.id)
+
+ def query_vpp_config(self):
+ for t in self._test.vapi.cnat_translation_dump():
+ if self.id == t.translation.id:
+ return t.translation
+ return None
+
+ def object_id(self):
+ return ("cnat-translation-%s" % (self.vip))
+
+ def get_stats(self):
+ c = self._test.statistics.get_counter("/net/cnat-translation")
+ return c[0][self.id]
+
+
+class TestCNatTranslation(VppTestCase):
+ """ CNat Translation """
+ extra_vpp_punt_config = ["cnat", "{",
+ "session-db-buckets", "64",
+ "session-cleanup-timeout", "0.1",
+ "session-max-age", "1",
+ "tcp-max-age", "1",
+ "scanner", "off", "}"]
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestCNatTranslation, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestCNatTranslation, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestCNatTranslation, self).setUp()
+
+ self.create_pg_interfaces(range(3))
+
+ for i in self.pg_interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+ i.config_ip6()
+ i.resolve_ndp()
+
+ def tearDown(self):
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.unconfig_ip6()
+ i.admin_down()
+ super(TestCNatTranslation, self).tearDown()
+
+ def cnat_create_translation(self, vip, nbr):
+ ip_v = "ip6" if vip.isV6 else "ip4"
+ dep = Ep(getattr(self.pg1.remote_hosts[nbr], ip_v), 4000 + nbr)
+ sep = Ep("::", 0) if vip.isV6 else Ep("0.0.0.0", 0)
+ t1 = VppCNatTranslation(
+ self, vip.l4p, vip,
+ [EpTuple(sep, dep), EpTuple(sep, dep)])
+ t1.add_vpp_config()
+ return t1
+
+ def cnat_test_translation(self, t1, nbr, sports, isV6=False):
+ ip_v = "ip6" if isV6 else "ip4"
+ ip_class = IPv6 if isV6 else IP
+ vip = t1.vip
+
+ #
+ # Flows
+ #
+ for src in self.pg0.remote_hosts:
+ for sport in sports:
+ # from client to vip
+ p1 = (Ether(dst=self.pg0.local_mac,
+ src=src.mac) /
+ ip_class(src=getattr(src, ip_v), dst=vip.ip) /
+ vip.l4p(sport=sport, dport=vip.port) /
+ Raw())
+
+ self.vapi.cli("trace add pg-input 1")
+ rxs = self.send_and_expect(self.pg0,
+ p1 * N_PKTS,
+ self.pg1)
+ self.logger.info(self.vapi.cli("show trace max 1"))
+
+ for rx in rxs:
+ self.assert_packet_checksums_valid(rx)
+ self.assertEqual(
+ rx[ip_class].dst,
+ getattr(self.pg1.remote_hosts[nbr], ip_v))
+ self.assertEqual(rx[vip.l4p].dport, 4000 + nbr)
+ self.assertEqual(
+ rx[ip_class].src,
+ getattr(src, ip_v))
+ self.assertEqual(rx[vip.l4p].sport, sport)
+
+ # from vip to client
+ p1 = (Ether(dst=self.pg1.local_mac,
+ src=self.pg1.remote_mac) /
+ ip_class(src=getattr(
+ self.pg1.remote_hosts[nbr],
+ ip_v),
+ dst=getattr(src, ip_v)) /
+ vip.l4p(sport=4000 + nbr, dport=sport) /
+ Raw())
+
+ rxs = self.send_and_expect(self.pg1,
+ p1 * N_PKTS,
+ self.pg0)
+
+ for rx in rxs:
+ self.assert_packet_checksums_valid(rx)
+ self.assertEqual(
+ rx[ip_class].dst,
+ getattr(src, ip_v))
+ self.assertEqual(rx[vip.l4p].dport, sport)
+ self.assertEqual(rx[ip_class].src, vip.ip)
+ self.assertEqual(rx[vip.l4p].sport, vip.port)
+
+ #
+ # packets to the VIP that do not match a
+ # translation are dropped
+ #
+ p1 = (Ether(dst=self.pg0.local_mac,
+ src=src.mac) /
+ ip_class(src=getattr(src, ip_v), dst=vip.ip) /
+ vip.l4p(sport=sport, dport=6666) /
+ Raw())
+
+ self.send_and_assert_no_replies(self.pg0,
+ p1 * N_PKTS,
+ self.pg1)
+
+ #
+ # packets from the VIP that do not match a
+ # session are forwarded
+ #
+ p1 = (Ether(dst=self.pg1.local_mac,
+ src=self.pg1.remote_mac) /
+ ip_class(src=getattr(
+ self.pg1.remote_hosts[nbr],
+ ip_v),
+ dst=getattr(src, ip_v)) /
+ vip.l4p(sport=6666, dport=sport) /
+ Raw())
+
+ rxs = self.send_and_expect(self.pg1,
+ p1 * N_PKTS,
+ self.pg0)
+
+ def cnat_test_translation_update(self, t1, sports, isV6=False):
+ ip_v = "ip6" if isV6 else "ip4"
+ ip_class = IPv6 if isV6 else IP
+ vip = t1.vip
+
+ #
+ # modify the translation to use a different backend
+ #
+ dep = Ep(getattr(self.pg2, 'remote_' + ip_v), 5000)
+ sep = Ep("::", 0) if isV6 else Ep("0.0.0.0", 0)
+ t1.modify_vpp_config([EpTuple(sep, dep)])
+
+ #
+ # existing flows follow the old path
+ #
+ for src in self.pg0.remote_hosts:
+ for sport in sports:
+ # from client to vip
+ p1 = (Ether(dst=self.pg0.local_mac,
+ src=src.mac) /
+ ip_class(src=getattr(src, ip_v), dst=vip.ip) /
+ vip.l4p(sport=sport, dport=vip.port) /
+ Raw())
+
+ rxs = self.send_and_expect(self.pg0,
+ p1 * N_PKTS,
+ self.pg1)
+
+ #
+ # new flows go to the new backend
+ #
+ for src in self.pg0.remote_hosts:
+ p1 = (Ether(dst=self.pg0.local_mac,
+ src=src.mac) /
+ ip_class(src=getattr(src, ip_v), dst=vip.ip) /
+ vip.l4p(sport=9999, dport=vip.port) /
+ Raw())
+
+ rxs = self.send_and_expect(self.pg0,
+ p1 * N_PKTS,
+ self.pg2)
+
+ def cnat_translation(self, vips, isV6=False):
+ """ CNat Translation """
+
+ ip_class = IPv6 if isV6 else IP
+ ip_v = "ip6" if isV6 else "ip4"
+ sports = [1234, 1233]
+
+ #
+ # turn the scanner off whilst testing otherwise sessions
+ # will time out
+ #
+ self.vapi.cli("test cnat scanner off")
+
+ sessions = self.vapi.cnat_session_dump()
+
+ trs = []
+ for nbr, vip in enumerate(vips):
+ trs.append(self.cnat_create_translation(vip, nbr))
+
+ self.logger.info(self.vapi.cli("sh cnat client"))
+ self.logger.info(self.vapi.cli("sh cnat translation"))
+
+ #
+ # translations
+ #
+ for nbr, vip in enumerate(vips):
+ self.cnat_test_translation(trs[nbr], nbr, sports, isV6=isV6)
+ self.cnat_test_translation_update(trs[nbr], sports, isV6=isV6)
+ if isV6:
+ self.logger.info(self.vapi.cli(
+ "sh ip6 fib %s" % self.pg0.remote_ip6))
+ else:
+ self.logger.info(self.vapi.cli(
+ "sh ip fib %s" % self.pg0.remote_ip4))
+ self.logger.info(self.vapi.cli("sh cnat session verbose"))
+
+ #
+ # turn the scanner back on and wait until the sessions
+ # all disapper
+ #
+ self.vapi.cli("test cnat scanner on")
+
+ n_tries = 0
+ sessions = self.vapi.cnat_session_dump()
+ while (len(sessions) and n_tries < 100):
+ n_tries += 1
+ sessions = self.vapi.cnat_session_dump()
+ self.sleep(2)
+ self.logger.info(self.vapi.cli("show cnat session verbose"))
+
+ self.assertTrue(n_tries < 100)
+ self.vapi.cli("test cnat scanner off")
+
+ #
+ # load some flows again and purge
+ #
+ for vip in vips:
+ for src in self.pg0.remote_hosts:
+ for sport in sports:
+ # from client to vip
+ p1 = (Ether(dst=self.pg0.local_mac,
+ src=src.mac) /
+ ip_class(src=getattr(src, ip_v), dst=vip.ip) /
+ vip.l4p(sport=sport, dport=vip.port) /
+ Raw())
+ self.send_and_expect(self.pg0,
+ p1 * N_PKTS,
+ self.pg2)
+
+ for tr in trs:
+ tr.remove_vpp_config()
+
+ self.assertTrue(self.vapi.cnat_session_dump())
+ self.vapi.cnat_session_purge()
+ self.assertFalse(self.vapi.cnat_session_dump())
+
+ def test_icmp(self):
+ vips = [
+ Ep("30.0.0.1", 5555),
+ Ep("30.0.0.2", 5554),
+ Ep("30.0.0.2", 5553, UDP),
+ Ep("30::1", 6666),
+ Ep("30::2", 5553, UDP),
+ ]
+ sport = 1234
+
+ self.pg0.generate_remote_hosts(len(vips))
+ self.pg0.configure_ipv6_neighbors()
+ self.pg0.configure_ipv4_neighbors()
+
+ self.pg1.generate_remote_hosts(len(vips))
+ self.pg1.configure_ipv6_neighbors()
+ self.pg1.configure_ipv4_neighbors()
+
+ self.vapi.cli("test cnat scanner off")
+ trs = []
+ for nbr, vip in enumerate(vips):
+ trs.append(self.cnat_create_translation(vip, nbr))
+
+ self.logger.info(self.vapi.cli("sh cnat client"))
+ self.logger.info(self.vapi.cli("sh cnat translation"))
+
+ for nbr, vip in enumerate(vips):
+ if vip.isV6:
+ client_addr = self.pg0.remote_hosts[0].ip6
+ remote_addr = self.pg1.remote_hosts[nbr].ip6
+ remote2_addr = self.pg2.remote_hosts[0].ip6
+ else:
+ client_addr = self.pg0.remote_hosts[0].ip4
+ remote_addr = self.pg1.remote_hosts[nbr].ip4
+ remote2_addr = self.pg2.remote_hosts[0].ip4
+ IP46 = IPv6 if vip.isV6 else IP
+ # from client to vip
+ p1 = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_hosts[0].mac) /
+ IP46(src=client_addr, dst=vip.ip) /
+ vip.l4p(sport=sport, dport=vip.port) /
+ Raw())
+
+ rxs = self.send_and_expect(self.pg0,
+ p1 * N_PKTS,
+ self.pg1)
+
+ for rx in rxs:
+ self.assert_packet_checksums_valid(rx)
+ self.assertEqual(rx[IP46].dst, remote_addr)
+ self.assertEqual(rx[vip.l4p].dport, 4000 + nbr)
+ self.assertEqual(rx[IP46].src, client_addr)
+ self.assertEqual(rx[vip.l4p].sport, sport)
+
+ InnerIP = rxs[0][IP46]
+
+ ICMP46 = ICMPv6DestUnreach if vip.isV6 else ICMP
+ ICMPelem = ICMPv6DestUnreach(code=1) if vip.isV6 else ICMP(type=11)
+ # from vip to client, ICMP error
+ p1 = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IP46(src=remote_addr, dst=client_addr) /
+ ICMPelem / InnerIP)
+
+ rxs = self.send_and_expect(self.pg1,
+ p1 * N_PKTS,
+ self.pg0)
+
+ TCPUDPError = TCPerror if vip.l4p == TCP else UDPerror
+ IP46error = IPerror6 if vip.isV6 else IPerror
+ for rx in rxs:
+ self.assert_packet_checksums_valid(rx)
+ self.assertEqual(rx[IP46].src, vip.ip)
+ self.assertEqual(rx[ICMP46][IP46error].src, client_addr)
+ self.assertEqual(rx[ICMP46][IP46error].dst, vip.ip)
+ self.assertEqual(rx[ICMP46][IP46error]
+ [TCPUDPError].sport, sport)
+ self.assertEqual(rx[ICMP46][IP46error]
+ [TCPUDPError].dport, vip.port)
+
+ # from other remote to client, ICMP error
+ # outside shouldn't be NAT-ed
+ p1 = (Ether(dst=self.pg2.local_mac, src=self.pg2.remote_mac) /
+ IP46(src=remote2_addr, dst=client_addr) /
+ ICMPelem / InnerIP)
+
+ rxs = self.send_and_expect(self.pg1,
+ p1 * N_PKTS,
+ self.pg0)
+
+ TCPUDPError = TCPerror if vip.l4p == TCP else UDPerror
+ IP46error = IPerror6 if vip.isV6 else IPerror
+ for rx in rxs:
+ self.assert_packet_checksums_valid(rx)
+ self.assertEqual(rx[IP46].src, remote2_addr)
+ self.assertEqual(rx[ICMP46][IP46error].src, client_addr)
+ self.assertEqual(rx[ICMP46][IP46error].dst, vip.ip)
+ self.assertEqual(rx[ICMP46][IP46error]
+ [TCPUDPError].sport, sport)
+ self.assertEqual(rx[ICMP46][IP46error]
+ [TCPUDPError].dport, vip.port)
+
+ self.vapi.cnat_session_purge()
+
+ def test_cnat6(self):
+ # """ CNat Translation ipv6 """
+ vips = [
+ Ep("30::1", 5555),
+ Ep("30::2", 5554),
+ Ep("30::2", 5553, UDP),
+ ]
+
+ self.pg0.generate_remote_hosts(len(vips))
+ self.pg0.configure_ipv6_neighbors()
+ self.pg1.generate_remote_hosts(len(vips))
+ self.pg1.configure_ipv6_neighbors()
+
+ self.cnat_translation(vips, isV6=True)
+
+ def test_cnat4(self):
+ # """ CNat Translation ipv4 """
+
+ vips = [
+ Ep("30.0.0.1", 5555),
+ Ep("30.0.0.2", 5554),
+ Ep("30.0.0.2", 5553, UDP),
+ ]
+
+ self.pg0.generate_remote_hosts(len(vips))
+ self.pg0.configure_ipv4_neighbors()
+ self.pg1.generate_remote_hosts(len(vips))
+ self.pg1.configure_ipv4_neighbors()
+
+ self.cnat_translation(vips)
+
+
+class TestCNatSourceNAT(VppTestCase):
+ """ CNat Source NAT """
+ extra_vpp_punt_config = ["cnat", "{",
+ "session-cleanup-timeout", "0.1",
+ "session-max-age", "1",
+ "tcp-max-age", "1",
+ "scanner", "off", "}"]
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestCNatSourceNAT, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestCNatSourceNAT, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestCNatSourceNAT, self).setUp()
+
+ self.create_pg_interfaces(range(3))
+
+ for i in self.pg_interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+ i.config_ip6()
+ i.resolve_ndp()
+
+ self.pg0.configure_ipv6_neighbors()
+ self.pg0.configure_ipv4_neighbors()
+ self.pg1.generate_remote_hosts(2)
+ self.pg1.configure_ipv4_neighbors()
+ self.pg1.configure_ipv6_neighbors()
+
+ self.vapi.cnat_set_snat_addresses(
+ snat_ip4=self.pg2.remote_hosts[0].ip4,
+ snat_ip6=self.pg2.remote_hosts[0].ip6,
+ sw_if_index=INVALID_INDEX)
+ self.vapi.feature_enable_disable(
+ enable=1,
+ arc_name="ip6-unicast",
+ feature_name="cnat-snat-ip6",
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.feature_enable_disable(
+ enable=1,
+ arc_name="ip4-unicast",
+ feature_name="cnat-snat-ip4",
+ sw_if_index=self.pg0.sw_if_index)
+
+ policie_tbls = VppEnum.vl_api_cnat_snat_policy_table_t
+ self.vapi.cnat_set_snat_policy(
+ policy=VppEnum.vl_api_cnat_snat_policies_t.CNAT_POLICY_IF_PFX)
+ for i in self.pg_interfaces:
+ self.vapi.cnat_snat_policy_add_del_if(
+ sw_if_index=i.sw_if_index, is_add=1,
+ table=policie_tbls.CNAT_POLICY_INCLUDE_V6)
+ self.vapi.cnat_snat_policy_add_del_if(
+ sw_if_index=i.sw_if_index, is_add=1,
+ table=policie_tbls.CNAT_POLICY_INCLUDE_V4)
+
+ def tearDown(self):
+ self.vapi.cnat_session_purge()
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.unconfig_ip6()
+ i.admin_down()
+ super(TestCNatSourceNAT, self).tearDown()
+
+ def test_snat_v6(self):
+ # """ CNat Source Nat v6 """
+ self.sourcenat_test_tcp_udp_conf(TCP, isV6=True)
+ self.sourcenat_test_tcp_udp_conf(UDP, isV6=True)
+ self.sourcenat_test_icmp_err_conf(isV6=True)
+ self.sourcenat_test_icmp_echo6_conf()
+
+ def test_snat_v4(self):
+ # """ CNat Source Nat v4 """
+ self.sourcenat_test_tcp_udp_conf(TCP)
+ self.sourcenat_test_tcp_udp_conf(UDP)
+ self.sourcenat_test_icmp_err_conf()
+ self.sourcenat_test_icmp_echo4_conf()
+
+ def sourcenat_test_icmp_echo6_conf(self):
+ sports = [1234, 1235]
+ dports = [6661, 6662]
+
+ for nbr, remote_host in enumerate(self.pg1.remote_hosts):
+ client_addr = self.pg0.remote_hosts[0].ip6
+ remote_addr = self.pg1.remote_hosts[nbr].ip6
+ src_nat_addr = self.pg2.remote_hosts[0].ip6
+
+ # ping from pods to outside network
+ p1 = (
+ Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_hosts[0].mac) /
+ IPv6(src=client_addr, dst=remote_addr) /
+ ICMPv6EchoRequest(id=0xfeed) /
+ Raw())
+
+ rxs = self.send_and_expect(
+ self.pg0,
+ p1 * N_PKTS,
+ self.pg1)
+
+ for rx in rxs:
+ self.assertEqual(rx[IPv6].src, src_nat_addr)
+ self.assert_packet_checksums_valid(rx)
+
+ received_id = rx[0][ICMPv6EchoRequest].id
+ # ping reply from outside to pods
+ p2 = (
+ Ether(dst=self.pg1.local_mac,
+ src=self.pg1.remote_hosts[nbr].mac) /
+ IPv6(src=remote_addr, dst=src_nat_addr) /
+ ICMPv6EchoReply(id=received_id))
+ rxs = self.send_and_expect(
+ self.pg1,
+ p2 * N_PKTS,
+ self.pg0)
+
+ for rx in rxs:
+ self.assert_packet_checksums_valid(rx)
+ self.assertEqual(rx[IPv6].src, remote_addr)
+ self.assertEqual(rx[ICMPv6EchoReply].id, 0xfeed)
+
+ def sourcenat_test_icmp_echo4_conf(self):
+ sports = [1234, 1235]
+ dports = [6661, 6662]
+
+ for nbr, remote_host in enumerate(self.pg1.remote_hosts):
+ IP46 = IP
+ client_addr = self.pg0.remote_hosts[0].ip4
+ remote_addr = self.pg1.remote_hosts[nbr].ip4
+ src_nat_addr = self.pg2.remote_hosts[0].ip4
+
+ # ping from pods to outside network
+ p1 = (
+ Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_hosts[0].mac) /
+ IP46(src=client_addr, dst=remote_addr) /
+ ICMP(type=8, id=0xfeed) /
+ Raw())
+
+ rxs = self.send_and_expect(
+ self.pg0,
+ p1 * N_PKTS,
+ self.pg1)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP46].src, src_nat_addr)
+ self.assert_packet_checksums_valid(rx)
+
+ received_id = rx[0][ICMP].id
+ # ping reply from outside to pods
+ p2 = (
+ Ether(dst=self.pg1.local_mac,
+ src=self.pg1.remote_hosts[nbr].mac) /
+ IP46(src=remote_addr, dst=src_nat_addr) /
+ ICMP(type=0, id=received_id))
+ rxs = self.send_and_expect(
+ self.pg1,
+ p2 * N_PKTS,
+ self.pg0)
+
+ for rx in rxs:
+ self.assert_packet_checksums_valid(rx)
+ self.assertEqual(rx[IP46].src, remote_addr)
+ self.assertEqual(rx[ICMP].id, 0xfeed)
+
+ def sourcenat_test_icmp_err_conf(self, isV6=False):
+ sports = [1234, 1235]
+ dports = [6661, 6662]
+
+ for nbr, remote_host in enumerate(self.pg1.remote_hosts):
+ if isV6:
+ IP46 = IPv6
+ client_addr = self.pg0.remote_hosts[0].ip6
+ remote_addr = self.pg1.remote_hosts[nbr].ip6
+ src_nat_addr = self.pg2.remote_hosts[0].ip6
+ ICMP46 = ICMPv6DestUnreach
+ ICMPelem = ICMPv6DestUnreach(code=1)
+ IP46error = IPerror6
+ else:
+ IP46 = IP
+ client_addr = self.pg0.remote_hosts[0].ip4
+ remote_addr = self.pg1.remote_hosts[nbr].ip4
+ src_nat_addr = self.pg2.remote_hosts[0].ip4
+ IP46error = IPerror
+ ICMP46 = ICMP
+ ICMPelem = ICMP(type=11)
+
+ # from pods to outside network
+ p1 = (
+ Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_hosts[0].mac) /
+ IP46(src=client_addr, dst=remote_addr) /
+ TCP(sport=sports[nbr], dport=dports[nbr]) /
+ Raw())
+
+ rxs = self.send_and_expect(
+ self.pg0,
+ p1 * N_PKTS,
+ self.pg1)
+ for rx in rxs:
+ self.assert_packet_checksums_valid(rx)
+ self.assertEqual(rx[IP46].dst, remote_addr)
+ self.assertEqual(rx[TCP].dport, dports[nbr])
+ self.assertEqual(rx[IP46].src, src_nat_addr)
+ sport = rx[TCP].sport
+
+ InnerIP = rxs[0][IP46]
+ # from outside to pods, ICMP error
+ p2 = (
+ Ether(dst=self.pg1.local_mac,
+ src=self.pg1.remote_hosts[nbr].mac) /
+ IP46(src=remote_addr, dst=src_nat_addr) /
+ ICMPelem / InnerIP)
+
+ rxs = self.send_and_expect(
+ self.pg1,
+ p2 * N_PKTS,
+ self.pg0)
+
+ for rx in rxs:
+ self.assert_packet_checksums_valid(rx)
+ self.assertEqual(rx[IP46].src, remote_addr)
+ self.assertEqual(rx[ICMP46][IP46error].src, client_addr)
+ self.assertEqual(rx[ICMP46][IP46error].dst, remote_addr)
+ self.assertEqual(rx[ICMP46][IP46error]
+ [TCPerror].sport, sports[nbr])
+ self.assertEqual(rx[ICMP46][IP46error]
+ [TCPerror].dport, dports[nbr])
+
+ def sourcenat_test_tcp_udp_conf(self, l4p, isV6=False):
+ sports = [1234, 1235]
+ dports = [6661, 6662]
+
+ for nbr, remote_host in enumerate(self.pg1.remote_hosts):
+ if isV6:
+ IP46 = IPv6
+ client_addr = self.pg0.remote_hosts[0].ip6
+ remote_addr = self.pg1.remote_hosts[nbr].ip6
+ src_nat_addr = self.pg2.remote_hosts[0].ip6
+ exclude_prefix = ip_network(
+ "%s/100" % remote_addr, strict=False)
+ else:
+ IP46 = IP
+ client_addr = self.pg0.remote_hosts[0].ip4
+ remote_addr = self.pg1.remote_hosts[nbr].ip4
+ src_nat_addr = self.pg2.remote_hosts[0].ip4
+ exclude_prefix = ip_network(
+ "%s/16" % remote_addr, strict=False)
+ # from pods to outside network
+ p1 = (
+ Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_hosts[0].mac) /
+ IP46(src=client_addr, dst=remote_addr) /
+ l4p(sport=sports[nbr], dport=dports[nbr]) /
+ Raw())
+
+ self.vapi.cli("trace add pg-input 1")
+ rxs = self.send_and_expect(
+ self.pg0,
+ p1 * N_PKTS,
+ self.pg1)
+ self.logger.info(self.vapi.cli("show trace max 1"))
+
+ for rx in rxs:
+ self.assert_packet_checksums_valid(rx)
+ self.assertEqual(rx[IP46].dst, remote_addr)
+ self.assertEqual(rx[l4p].dport, dports[nbr])
+ self.assertEqual(rx[IP46].src, src_nat_addr)
+ sport = rx[l4p].sport
+
+ # from outside to pods
+ p2 = (
+ Ether(dst=self.pg1.local_mac,
+ src=self.pg1.remote_hosts[nbr].mac) /
+ IP46(src=remote_addr, dst=src_nat_addr) /
+ l4p(sport=dports[nbr], dport=sport) /
+ Raw())
+
+ rxs = self.send_and_expect(
+ self.pg1,
+ p2 * N_PKTS,
+ self.pg0)
+
+ for rx in rxs:
+ self.assert_packet_checksums_valid(rx)
+ self.assertEqual(rx[IP46].dst, client_addr)
+ self.assertEqual(rx[l4p].dport, sports[nbr])
+ self.assertEqual(rx[l4p].sport, dports[nbr])
+ self.assertEqual(rx[IP46].src, remote_addr)
+
+ # add remote host to exclude list
+ self.vapi.cnat_snat_policy_add_del_exclude_pfx(
+ prefix=exclude_prefix, is_add=1)
+ self.vapi.cnat_session_purge()
+
+ rxs = self.send_and_expect(
+ self.pg0,
+ p1 * N_PKTS,
+ self.pg1)
+ for rx in rxs:
+ self.assert_packet_checksums_valid(rx)
+ self.assertEqual(rx[IP46].dst, remote_addr)
+ self.assertEqual(rx[l4p].dport, dports[nbr])
+ self.assertEqual(rx[IP46].src, client_addr)
+
+ # remove remote host from exclude list
+ self.vapi.cnat_snat_policy_add_del_exclude_pfx(
+ prefix=exclude_prefix, is_add=0)
+ self.vapi.cnat_session_purge()
+
+ rxs = self.send_and_expect(
+ self.pg0,
+ p1 * N_PKTS,
+ self.pg1)
+
+ for rx in rxs:
+ self.assert_packet_checksums_valid(rx)
+ self.assertEqual(rx[IP46].dst, remote_addr)
+ self.assertEqual(rx[l4p].dport, dports[nbr])
+ self.assertEqual(rx[IP46].src, src_nat_addr)
+
+ self.vapi.cnat_session_purge()
+
+
+class TestCNatDHCP(VppTestCase):
+ """ CNat Translation """
+ extra_vpp_punt_config = ["cnat", "{",
+ "session-db-buckets", "64",
+ "session-cleanup-timeout", "0.1",
+ "session-max-age", "1",
+ "tcp-max-age", "1",
+ "scanner", "off", "}"]
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestCNatDHCP, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestCNatDHCP, cls).tearDownClass()
+
+ def tearDown(self):
+ for i in self.pg_interfaces:
+ i.admin_down()
+ super(TestCNatDHCP, self).tearDown()
+
+ def create_translation(self, vip_pg, *args, is_v6=False):
+ vip = Ep(sw_if_index=vip_pg.sw_if_index, is_v6=is_v6)
+ paths = []
+ for (src_pg, dst_pg) in args:
+ paths.append(EpTuple(
+ Ep.from_pg(src_pg, is_v6=is_v6),
+ Ep.from_pg(dst_pg, is_v6=is_v6)
+ ))
+ t1 = VppCNatTranslation(self, TCP, vip, paths)
+ t1.add_vpp_config()
+ return t1
+
+ def make_addr(self, sw_if_index, i, is_v6):
+ if is_v6:
+ return "fd01:%x::%u" % (sw_if_index, i + 1)
+ else:
+ return "172.16.%u.%u" % (sw_if_index, i)
+
+ def make_prefix(self, sw_if_index, i, is_v6):
+ if is_v6:
+ return "%s/128" % self.make_addr(sw_if_index, i, is_v6)
+ else:
+ return "%s/32" % self.make_addr(sw_if_index, i, is_v6)
+
+ def check_resolved(self, tr, vip_pg, *args, i=0, is_v6=False):
+ qt1 = tr.query_vpp_config()
+ self.assertEqual(str(qt1.vip.addr), self.make_addr(
+ vip_pg.sw_if_index, i, is_v6))
+ for (src_pg, dst_pg), path in zip(args, qt1.paths):
+ if src_pg:
+ self.assertEqual(str(path.src_ep.addr), self.make_addr(
+ src_pg.sw_if_index, i, is_v6))
+ if dst_pg:
+ self.assertEqual(str(path.dst_ep.addr), self.make_addr(
+ dst_pg.sw_if_index, i, is_v6))
+
+ def config_ips(self, rng, is_add=1, is_v6=False):
+ for pg, i in product(self.pg_interfaces, rng):
+ self.vapi.sw_interface_add_del_address(
+ sw_if_index=pg.sw_if_index,
+ prefix=self.make_prefix(pg.sw_if_index, i, is_v6),
+ is_add=is_add)
+
+ def test_dhcp_v4(self):
+ self.create_pg_interfaces(range(5))
+ for i in self.pg_interfaces:
+ i.admin_up()
+ pglist = (self.pg0, (self.pg1, self.pg2), (self.pg1, self.pg4))
+ t1 = self.create_translation(*pglist)
+ self.config_ips([0])
+ self.check_resolved(t1, *pglist)
+ self.config_ips([1])
+ self.config_ips([0], is_add=0)
+ self.check_resolved(t1, *pglist, i=1)
+ self.config_ips([1], is_add=0)
+ t1.remove_vpp_config()
+
+ def test_dhcp_v6(self):
+ self.create_pg_interfaces(range(5))
+ for i in self.pg_interfaces:
+ i.admin_up()
+ pglist = (self.pg0, (self.pg1, self.pg2), (self.pg1, self.pg4))
+ t1 = self.create_translation(*pglist, is_v6=True)
+ self.config_ips([0], is_v6=True)
+ self.check_resolved(t1, *pglist, is_v6=True)
+ self.config_ips([1], is_v6=True)
+ self.config_ips([0], is_add=0, is_v6=True)
+ self.check_resolved(t1, *pglist, i=1, is_v6=True)
+ self.config_ips([1], is_add=0, is_v6=True)
+ t1.remove_vpp_config()
+
+ def test_dhcp_snat(self):
+ self.create_pg_interfaces(range(1))
+ for i in self.pg_interfaces:
+ i.admin_up()
+ self.vapi.cnat_set_snat_addresses(sw_if_index=self.pg0.sw_if_index)
+ self.config_ips([0], is_v6=False)
+ self.config_ips([0], is_v6=True)
+ r = self.vapi.cnat_get_snat_addresses()
+ self.assertEqual(str(r.snat_ip4), self.make_addr(
+ self.pg0.sw_if_index, 0, False))
+ self.assertEqual(str(r.snat_ip6), self.make_addr(
+ self.pg0.sw_if_index, 0, True))
+ self.config_ips([1], is_v6=False)
+ self.config_ips([1], is_v6=True)
+ self.config_ips([0], is_add=0, is_v6=False)
+ self.config_ips([0], is_add=0, is_v6=True)
+ r = self.vapi.cnat_get_snat_addresses()
+ self.assertEqual(str(r.snat_ip4), self.make_addr(
+ self.pg0.sw_if_index, 1, False))
+ self.assertEqual(str(r.snat_ip6), self.make_addr(
+ self.pg0.sw_if_index, 1, True))
+ self.config_ips([1], is_add=0, is_v6=False)
+ self.config_ips([1], is_add=0, is_v6=True)
+ self.vapi.cnat_set_snat_addresses(sw_if_index=INVALID_INDEX)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_counters.py b/test/test_counters.py
new file mode 100644
index 00000000000..e4cb85621d0
--- /dev/null
+++ b/test/test_counters.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python3
+
+from framework import VppTestCase
+from framework import tag_fixme_vpp_workers
+
+
+@tag_fixme_vpp_workers
+class TestCounters(VppTestCase):
+ """ Counters C Unit Tests """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestCounters, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestCounters, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestCounters, self).setUp()
+
+ def tearDown(self):
+ super(TestCounters, self).tearDown()
+
+ def test_counter_simple_expand(self):
+ """ Simple Counter Expand """
+ error = self.vapi.cli("test counter simple expand")
+
+ if error:
+ self.logger.critical(error)
+ self.assertNotIn('failed', error)
+
+ def test_counter_combined_expand(self):
+ """ Combined Counter Expand """
+ error = self.vapi.cli("test counter combined expand")
+
+ if error:
+ self.logger.critical(error)
+ self.assertNotIn('failed', error)
diff --git a/test/test_crypto.py b/test/test_crypto.py
new file mode 100644
index 00000000000..aa62dba1bab
--- /dev/null
+++ b/test/test_crypto.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python3
+
+import unittest
+
+from framework import VppTestCase, VppTestRunner
+
+
+class TestCrypto(VppTestCase):
+ """ Crypto Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestCrypto, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestCrypto, cls).tearDownClass()
+
+ def test_crypto(self):
+ """ Crypto Unit Tests """
+ error = self.vapi.cli("test crypto")
+
+ if error:
+ self.logger.critical(error)
+ self.assertNotIn("FAIL", error)
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_det44.py b/test/test_det44.py
new file mode 100644
index 00000000000..ced77468959
--- /dev/null
+++ b/test/test_det44.py
@@ -0,0 +1,682 @@
+#!/usr/bin/env python3
+
+import socket
+import struct
+import unittest
+import scapy.compat
+from time import sleep
+from framework import VppTestCase, running_extended_tests
+from ipfix import IPFIX, Set, Template, Data, IPFIXDecoder
+from scapy.layers.inet import IP, TCP, UDP, ICMP
+from scapy.layers.inet import IPerror, UDPerror
+from scapy.layers.l2 import Ether
+from util import ppp
+
+
+class TestDET44(VppTestCase):
+ """ Deterministic NAT Test Cases """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestDET44, cls).setUpClass()
+ cls.vapi.cli("set log class det44 level debug")
+
+ cls.tcp_port_in = 6303
+ cls.tcp_external_port = 6303
+ cls.udp_port_in = 6304
+ cls.udp_external_port = 6304
+ cls.icmp_id_in = 6305
+ cls.nat_addr = '10.0.0.3'
+
+ cls.create_pg_interfaces(range(3))
+ cls.interfaces = list(cls.pg_interfaces)
+
+ for i in cls.interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+
+ cls.pg0.generate_remote_hosts(2)
+ cls.pg0.configure_ipv4_neighbors()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestDET44, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestDET44, self).setUp()
+ self.vapi.det44_plugin_enable_disable(enable=1)
+
+ def tearDown(self):
+ super(TestDET44, self).tearDown()
+ if not self.vpp_dead:
+ self.vapi.det44_plugin_enable_disable(enable=0)
+
+ def show_commands_at_teardown(self):
+ self.logger.info(self.vapi.cli("show det44 interfaces"))
+ self.logger.info(self.vapi.cli("show det44 timeouts"))
+ self.logger.info(self.vapi.cli("show det44 mappings"))
+ self.logger.info(self.vapi.cli("show det44 sessions"))
+
+ def verify_capture_in(self, capture, in_if):
+ """
+ Verify captured packets on inside network
+
+ :param capture: Captured packets
+ :param in_if: Inside interface
+ """
+ fired = False
+ for packet in capture:
+ try:
+ self.assert_packet_checksums_valid(packet)
+ self.assertEqual(packet[IP].dst, in_if.remote_ip4)
+ if packet.haslayer(TCP):
+ self.assertEqual(packet[TCP].dport, self.tcp_port_in)
+ elif packet.haslayer(UDP):
+ self.assertEqual(packet[UDP].dport, self.udp_port_in)
+ else:
+ self.assertEqual(packet[ICMP].id, self.icmp_id_in)
+ except:
+ fired = True
+ self.logger.error(ppp("Unexpected or invalid packet "
+ "(inside network):", packet))
+ if fired:
+ raise
+
+ def verify_ipfix_max_entries_per_user(self, data, limit, src_addr):
+ """
+ Verify IPFIX maximum entries per user exceeded event
+
+ :param data: Decoded IPFIX data records
+ :param limit: Number of maximum entries per user
+ :param src_addr: IPv4 source address
+ """
+ self.assertEqual(1, len(data))
+ record = data[0]
+ # natEvent
+ self.assertEqual(scapy.compat.orb(record[230]), 13)
+ # natQuotaExceededEvent
+ self.assertEqual(struct.pack("I", 3), record[466])
+ # maxEntriesPerUser
+ self.assertEqual(struct.pack("I", limit), record[473])
+ # sourceIPv4Address
+ self.assertEqual(socket.inet_pton(socket.AF_INET, src_addr), record[8])
+
+ def initiate_tcp_session(self, in_if, out_if):
+ """
+ Initiates TCP session 3 WAY HAND SHAKE
+
+ :param in_if: Inside interface
+ :param out_if: Outside interface
+ """
+
+ # SYN packet in->out
+ p = (Ether(src=in_if.remote_mac, dst=in_if.local_mac) /
+ IP(src=in_if.remote_ip4, dst=out_if.remote_ip4) /
+ TCP(sport=self.tcp_port_in, dport=self.tcp_external_port,
+ flags="S"))
+ in_if.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = out_if.get_capture(1)
+ p = capture[0]
+ self.tcp_port_out = p[TCP].sport
+
+ # SYN + ACK packet out->in
+ p = (Ether(src=out_if.remote_mac, dst=out_if.local_mac) /
+ IP(src=out_if.remote_ip4, dst=self.nat_addr) /
+ TCP(sport=self.tcp_external_port, dport=self.tcp_port_out,
+ flags="SA"))
+ out_if.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ in_if.get_capture(1)
+
+ # ACK packet in->out
+ p = (Ether(src=in_if.remote_mac, dst=in_if.local_mac) /
+ IP(src=in_if.remote_ip4, dst=out_if.remote_ip4) /
+ TCP(sport=self.tcp_port_in, dport=self.tcp_external_port,
+ flags="A"))
+ in_if.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ out_if.get_capture(1)
+
+ def create_stream_in(self, in_if, out_if, ttl=64):
+ """
+ Create packet stream for inside network
+
+ :param in_if: Inside interface
+ :param out_if: Outside interface
+ :param ttl: TTL of generated packets
+ """
+ pkts = []
+ # TCP
+ p = (Ether(dst=in_if.local_mac, src=in_if.remote_mac) /
+ IP(src=in_if.remote_ip4, dst=out_if.remote_ip4, ttl=ttl) /
+ TCP(sport=self.tcp_port_in, dport=self.tcp_external_port))
+ pkts.append(p)
+
+ # UDP
+ p = (Ether(dst=in_if.local_mac, src=in_if.remote_mac) /
+ IP(src=in_if.remote_ip4, dst=out_if.remote_ip4, ttl=ttl) /
+ UDP(sport=self.udp_port_in, dport=self.udp_external_port))
+ pkts.append(p)
+
+ # ICMP
+ p = (Ether(dst=in_if.local_mac, src=in_if.remote_mac) /
+ IP(src=in_if.remote_ip4, dst=out_if.remote_ip4, ttl=ttl) /
+ ICMP(id=self.icmp_id_in, type='echo-request'))
+ pkts.append(p)
+
+ return pkts
+
+ def create_stream_out(self, out_if, dst_ip=None, ttl=64):
+ """
+ Create packet stream for outside network
+
+ :param out_if: Outside interface
+ :param dst_ip: Destination IP address (Default use global NAT address)
+ :param ttl: TTL of generated packets
+ """
+ if dst_ip is None:
+ dst_ip = self.nat_addr
+ pkts = []
+ # TCP
+ p = (Ether(dst=out_if.local_mac, src=out_if.remote_mac) /
+ IP(src=out_if.remote_ip4, dst=dst_ip, ttl=ttl) /
+ TCP(dport=self.tcp_port_out, sport=self.tcp_external_port))
+ pkts.append(p)
+
+ # UDP
+ p = (Ether(dst=out_if.local_mac, src=out_if.remote_mac) /
+ IP(src=out_if.remote_ip4, dst=dst_ip, ttl=ttl) /
+ UDP(dport=self.udp_port_out, sport=self.udp_external_port))
+ pkts.append(p)
+
+ # ICMP
+ p = (Ether(dst=out_if.local_mac, src=out_if.remote_mac) /
+ IP(src=out_if.remote_ip4, dst=dst_ip, ttl=ttl) /
+ ICMP(id=self.icmp_external_id, type='echo-reply'))
+ pkts.append(p)
+
+ return pkts
+
+ def verify_capture_out(self, capture, nat_ip=None):
+ """
+ Verify captured packets on outside network
+
+ :param capture: Captured packets
+ :param nat_ip: Translated IP address (Default use global NAT address)
+ :param same_port: Source port number is not translated (Default False)
+ """
+ if nat_ip is None:
+ nat_ip = self.nat_addr
+ for packet in capture:
+ try:
+ self.assertEqual(packet[IP].src, nat_ip)
+ if packet.haslayer(TCP):
+ self.tcp_port_out = packet[TCP].sport
+ elif packet.haslayer(UDP):
+ self.udp_port_out = packet[UDP].sport
+ else:
+ self.icmp_external_id = packet[ICMP].id
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet "
+ "(outside network):", packet))
+ raise
+
+ def test_deterministic_mode(self):
+ """ NAT plugin run deterministic mode """
+ in_addr = '172.16.255.0'
+ out_addr = '172.17.255.50'
+ in_addr_t = '172.16.255.20'
+ in_plen = 24
+ out_plen = 32
+
+ self.vapi.det44_add_del_map(is_add=1, in_addr=in_addr,
+ in_plen=in_plen, out_addr=out_addr,
+ out_plen=out_plen)
+
+ rep1 = self.vapi.det44_forward(in_addr_t)
+ self.assertEqual(str(rep1.out_addr), out_addr)
+ rep2 = self.vapi.det44_reverse(rep1.out_port_hi, out_addr)
+
+ self.assertEqual(str(rep2.in_addr), in_addr_t)
+
+ deterministic_mappings = self.vapi.det44_map_dump()
+ self.assertEqual(len(deterministic_mappings), 1)
+ dsm = deterministic_mappings[0]
+ self.assertEqual(in_addr, str(dsm.in_addr))
+ self.assertEqual(in_plen, dsm.in_plen)
+ self.assertEqual(out_addr, str(dsm.out_addr))
+ self.assertEqual(out_plen, dsm.out_plen)
+
+ def test_set_timeouts(self):
+ """ Set deterministic NAT timeouts """
+ timeouts_before = self.vapi.det44_get_timeouts()
+
+ self.vapi.det44_set_timeouts(
+ udp=timeouts_before.udp + 10,
+ tcp_established=timeouts_before.tcp_established + 10,
+ tcp_transitory=timeouts_before.tcp_transitory + 10,
+ icmp=timeouts_before.icmp + 10)
+
+ timeouts_after = self.vapi.det44_get_timeouts()
+
+ self.assertNotEqual(timeouts_before.udp, timeouts_after.udp)
+ self.assertNotEqual(timeouts_before.icmp, timeouts_after.icmp)
+ self.assertNotEqual(timeouts_before.tcp_established,
+ timeouts_after.tcp_established)
+ self.assertNotEqual(timeouts_before.tcp_transitory,
+ timeouts_after.tcp_transitory)
+
+ def test_in(self):
+ """ DET44 translation test (TCP, UDP, ICMP) """
+
+ nat_ip = "10.0.0.10"
+
+ self.vapi.det44_add_del_map(is_add=1, in_addr=self.pg0.remote_ip4,
+ in_plen=32,
+ out_addr=socket.inet_aton(nat_ip),
+ out_plen=32)
+
+ self.vapi.det44_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ is_add=1, is_inside=1)
+ self.vapi.det44_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1, is_inside=0)
+
+ # in2out
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture, nat_ip)
+
+ # out2in
+ pkts = self.create_stream_out(self.pg1, nat_ip)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg0)
+
+ # session dump test
+ sessions = self.vapi.det44_session_dump(self.pg0.remote_ip4)
+ self.assertEqual(len(sessions), 3)
+
+ # TCP session
+ s = sessions[0]
+ self.assertEqual(str(s.ext_addr), self.pg1.remote_ip4)
+ self.assertEqual(s.in_port, self.tcp_port_in)
+ self.assertEqual(s.out_port, self.tcp_port_out)
+ self.assertEqual(s.ext_port, self.tcp_external_port)
+
+ # UDP session
+ s = sessions[1]
+ self.assertEqual(str(s.ext_addr), self.pg1.remote_ip4)
+ self.assertEqual(s.in_port, self.udp_port_in)
+ self.assertEqual(s.out_port, self.udp_port_out)
+ self.assertEqual(s.ext_port, self.udp_external_port)
+
+ # ICMP session
+ s = sessions[2]
+ self.assertEqual(str(s.ext_addr), self.pg1.remote_ip4)
+ self.assertEqual(s.in_port, self.icmp_id_in)
+ self.assertEqual(s.out_port, self.icmp_external_id)
+
+ def test_multiple_users(self):
+ """ Deterministic NAT multiple users """
+
+ nat_ip = "10.0.0.10"
+ port_in = 80
+ external_port = 6303
+
+ host0 = self.pg0.remote_hosts[0]
+ host1 = self.pg0.remote_hosts[1]
+
+ self.vapi.det44_add_del_map(is_add=1, in_addr=host0.ip4, in_plen=24,
+ out_addr=socket.inet_aton(nat_ip),
+ out_plen=32)
+ self.vapi.det44_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ is_add=1, is_inside=1)
+ self.vapi.det44_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1, is_inside=0)
+
+ # host0 to out
+ p = (Ether(src=host0.mac, dst=self.pg0.local_mac) /
+ IP(src=host0.ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=port_in, dport=external_port))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, nat_ip)
+ self.assertEqual(ip.dst, self.pg1.remote_ip4)
+ self.assertEqual(tcp.dport, external_port)
+ port_out0 = tcp.sport
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # host1 to out
+ p = (Ether(src=host1.mac, dst=self.pg0.local_mac) /
+ IP(src=host1.ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=port_in, dport=external_port))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, nat_ip)
+ self.assertEqual(ip.dst, self.pg1.remote_ip4)
+ self.assertEqual(tcp.dport, external_port)
+ port_out1 = tcp.sport
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ dms = self.vapi.det44_map_dump()
+ self.assertEqual(1, len(dms))
+ self.assertEqual(2, dms[0].ses_num)
+
+ # out to host0
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=nat_ip) /
+ TCP(sport=external_port, dport=port_out0))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.pg1.remote_ip4)
+ self.assertEqual(ip.dst, host0.ip4)
+ self.assertEqual(tcp.dport, port_in)
+ self.assertEqual(tcp.sport, external_port)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # out to host1
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=nat_ip) /
+ TCP(sport=external_port, dport=port_out1))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.pg1.remote_ip4)
+ self.assertEqual(ip.dst, host1.ip4)
+ self.assertEqual(tcp.dport, port_in)
+ self.assertEqual(tcp.sport, external_port)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet", p))
+ raise
+
+ # session close api test
+ self.vapi.det44_close_session_out(socket.inet_aton(nat_ip),
+ port_out1,
+ self.pg1.remote_ip4,
+ external_port)
+ dms = self.vapi.det44_map_dump()
+ self.assertEqual(dms[0].ses_num, 1)
+
+ self.vapi.det44_close_session_in(host0.ip4,
+ port_in,
+ self.pg1.remote_ip4,
+ external_port)
+ dms = self.vapi.det44_map_dump()
+ self.assertEqual(dms[0].ses_num, 0)
+
+ def test_tcp_session_close_detection_in(self):
+ """ DET44 TCP session close from inside network """
+ self.vapi.det44_add_del_map(is_add=1, in_addr=self.pg0.remote_ip4,
+ in_plen=32,
+ out_addr=socket.inet_aton(self.nat_addr),
+ out_plen=32)
+ self.vapi.det44_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ is_add=1, is_inside=1)
+ self.vapi.det44_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1, is_inside=0)
+
+ self.initiate_tcp_session(self.pg0, self.pg1)
+
+ # close the session from inside
+ try:
+ # FIN packet in -> out
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=self.tcp_port_in, dport=self.tcp_external_port,
+ flags="F"))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(1)
+
+ pkts = []
+
+ # ACK packet out -> in
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ TCP(sport=self.tcp_external_port, dport=self.tcp_port_out,
+ flags="A"))
+ pkts.append(p)
+
+ # FIN packet out -> in
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ TCP(sport=self.tcp_external_port, dport=self.tcp_port_out,
+ flags="F"))
+ pkts.append(p)
+
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg0.get_capture(2)
+
+ # ACK packet in -> out
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=self.tcp_port_in, dport=self.tcp_external_port,
+ flags="A"))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(1)
+
+ # Check if deterministic NAT44 closed the session
+ dms = self.vapi.det44_map_dump()
+ self.assertEqual(0, dms[0].ses_num)
+ except:
+ self.logger.error("TCP session termination failed")
+ raise
+
+ def test_tcp_session_close_detection_out(self):
+ """ Deterministic NAT TCP session close from outside network """
+ self.vapi.det44_add_del_map(is_add=1, in_addr=self.pg0.remote_ip4,
+ in_plen=32,
+ out_addr=socket.inet_aton(self.nat_addr),
+ out_plen=32)
+ self.vapi.det44_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ is_add=1, is_inside=1)
+ self.vapi.det44_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1, is_inside=0)
+
+ self.initiate_tcp_session(self.pg0, self.pg1)
+
+ # close the session from outside
+ try:
+ # FIN packet out -> in
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ TCP(sport=self.tcp_external_port, dport=self.tcp_port_out,
+ flags="F"))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg0.get_capture(1)
+
+ pkts = []
+
+ # ACK packet in -> out
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=self.tcp_port_in, dport=self.tcp_external_port,
+ flags="A"))
+ pkts.append(p)
+
+ # ACK packet in -> out
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=self.tcp_port_in, dport=self.tcp_external_port,
+ flags="F"))
+ pkts.append(p)
+
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(2)
+
+ # ACK packet out -> in
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ TCP(sport=self.tcp_external_port, dport=self.tcp_port_out,
+ flags="A"))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg0.get_capture(1)
+
+ # Check if deterministic NAT44 closed the session
+ dms = self.vapi.det44_map_dump()
+ self.assertEqual(0, dms[0].ses_num)
+ except:
+ self.logger.error("TCP session termination failed")
+ raise
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ def test_session_timeout(self):
+ """ Deterministic NAT session timeouts """
+ self.vapi.det44_add_del_map(is_add=1, in_addr=self.pg0.remote_ip4,
+ in_plen=32,
+ out_addr=socket.inet_aton(self.nat_addr),
+ out_plen=32)
+ self.vapi.det44_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ is_add=1, is_inside=1)
+ self.vapi.det44_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1, is_inside=0)
+
+ self.initiate_tcp_session(self.pg0, self.pg1)
+ self.vapi.det44_set_timeouts(udp=5, tcp_established=5,
+ tcp_transitory=5, icmp=5)
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(len(pkts))
+ sleep(15)
+
+ dms = self.vapi.det44_map_dump()
+ self.assertEqual(0, dms[0].ses_num)
+
+ # TODO: ipfix needs to be separated from NAT base plugin
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ def test_session_limit_per_user(self):
+ """ Deterministic NAT maximum sessions per user limit """
+ self.vapi.det44_add_del_map(is_add=1, in_addr=self.pg0.remote_ip4,
+ in_plen=32,
+ out_addr=socket.inet_aton(self.nat_addr),
+ out_plen=32)
+ self.vapi.det44_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ is_add=1, is_inside=1)
+ self.vapi.det44_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1, is_inside=0)
+ self.vapi.set_ipfix_exporter(collector_address=self.pg2.remote_ip4,
+ src_address=self.pg2.local_ip4,
+ path_mtu=512,
+ template_interval=10)
+ self.vapi.nat_ipfix_enable_disable(domain_id=1, src_port=4739,
+ enable=1)
+
+ pkts = []
+ for port in range(1025, 2025):
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ UDP(sport=port, dport=port))
+ pkts.append(p)
+
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(len(pkts))
+
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ UDP(sport=3001, dport=3002))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.assert_nothing_captured()
+
+ # verify ICMP error packet
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ self.assertTrue(p.haslayer(ICMP))
+ icmp = p[ICMP]
+ self.assertEqual(icmp.type, 3)
+ self.assertEqual(icmp.code, 1)
+ self.assertTrue(icmp.haslayer(IPerror))
+ inner_ip = icmp[IPerror]
+ self.assertEqual(inner_ip[UDPerror].sport, 3001)
+ self.assertEqual(inner_ip[UDPerror].dport, 3002)
+
+ dms = self.vapi.det44_map_dump()
+
+ self.assertEqual(1000, dms[0].ses_num)
+
+ # verify IPFIX logging
+ self.vapi.ipfix_flush()
+ sleep(1)
+ capture = self.pg2.get_capture(2)
+ ipfix = IPFIXDecoder()
+ # first load template
+ for p in capture:
+ self.assertTrue(p.haslayer(IPFIX))
+ if p.haslayer(Template):
+ ipfix.add_template(p.getlayer(Template))
+ # verify events in data set
+ for p in capture:
+ if p.haslayer(Data):
+ data = ipfix.decode_data_set(p.getlayer(Set))
+ self.verify_ipfix_max_entries_per_user(data,
+ 1000,
+ self.pg0.remote_ip4)
+ self.vapi.nat_ipfix_enable_disable(domain_id=1, src_port=4739,
+ enable=0)
diff --git a/test/test_dhcp.py b/test/test_dhcp.py
new file mode 100644
index 00000000000..e17b0049df7
--- /dev/null
+++ b/test/test_dhcp.py
@@ -0,0 +1,1686 @@
+#!/usr/bin/env python3
+
+import unittest
+import socket
+import struct
+import six
+
+from framework import VppTestCase, VppTestRunner, running_extended_tests
+from framework import tag_run_solo
+from vpp_neighbor import VppNeighbor
+from vpp_ip_route import find_route, VppIpTable
+from util import mk_ll_addr
+import scapy.compat
+from scapy.layers.l2 import Ether, getmacbyip, ARP, Dot1Q
+from scapy.layers.inet import IP, UDP, ICMP
+from scapy.layers.inet6 import IPv6, in6_getnsmac
+from scapy.utils6 import in6_mactoifaceid
+from scapy.layers.dhcp import DHCP, BOOTP, DHCPTypes
+from scapy.layers.dhcp6 import DHCP6, DHCP6_Solicit, DHCP6_RelayForward, \
+ DHCP6_RelayReply, DHCP6_Advertise, DHCP6OptRelayMsg, DHCP6OptIfaceId, \
+ DHCP6OptStatusCode, DHCP6OptVSS, DHCP6OptClientLinkLayerAddr, DHCP6_Request
+from socket import AF_INET, AF_INET6, inet_pton, inet_ntop
+from scapy.utils6 import in6_ptop
+from vpp_papi import mac_pton, VppEnum
+from vpp_sub_interface import VppDot1QSubint
+from vpp_qos import VppQosEgressMap, VppQosMark
+from vpp_dhcp import VppDHCPClient, VppDHCPProxy
+
+
+DHCP4_CLIENT_PORT = 68
+DHCP4_SERVER_PORT = 67
+DHCP6_CLIENT_PORT = 547
+DHCP6_SERVER_PORT = 546
+
+
+@tag_run_solo
+class TestDHCP(VppTestCase):
+ """ DHCP Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestDHCP, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestDHCP, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestDHCP, self).setUp()
+
+ # create 6 pg interfaces for pg0 to pg5
+ self.create_pg_interfaces(range(6))
+ self.tables = []
+
+ # pg0 to 2 are IP configured in VRF 0, 1 and 2.
+ # pg3 to 5 are non IP-configured in VRF 0, 1 and 2.
+ table_id = 0
+ for table_id in range(1, 4):
+ tbl4 = VppIpTable(self, table_id)
+ tbl4.add_vpp_config()
+ self.tables.append(tbl4)
+ tbl6 = VppIpTable(self, table_id, is_ip6=1)
+ tbl6.add_vpp_config()
+ self.tables.append(tbl6)
+
+ table_id = 0
+ for i in self.pg_interfaces[:3]:
+ i.admin_up()
+ i.set_table_ip4(table_id)
+ i.set_table_ip6(table_id)
+ i.config_ip4()
+ i.resolve_arp()
+ i.config_ip6()
+ i.resolve_ndp()
+ table_id += 1
+
+ table_id = 0
+ for i in self.pg_interfaces[3:]:
+ i.admin_up()
+ i.set_table_ip4(table_id)
+ i.set_table_ip6(table_id)
+ table_id += 1
+
+ def tearDown(self):
+ for i in self.pg_interfaces[:3]:
+ i.unconfig_ip4()
+ i.unconfig_ip6()
+
+ for i in self.pg_interfaces:
+ i.set_table_ip4(0)
+ i.set_table_ip6(0)
+ i.admin_down()
+ super(TestDHCP, self).tearDown()
+
+ def verify_dhcp_has_option(self, pkt, option, value):
+ dhcp = pkt[DHCP]
+ found = False
+
+ for i in dhcp.options:
+ if isinstance(i, tuple):
+ if i[0] == option:
+ self.assertEqual(i[1], value)
+ found = True
+
+ self.assertTrue(found)
+
+ def validate_relay_options(self, pkt, intf, ip_addr, vpn_id, fib_id, oui):
+ dhcp = pkt[DHCP]
+ found = 0
+ data = []
+ id_len = len(vpn_id)
+
+ for i in dhcp.options:
+ if isinstance(i, tuple):
+ if i[0] == "relay_agent_Information":
+ #
+ # There are two sb-options present - each of length 6.
+ #
+ data = i[1]
+ if oui != 0:
+ self.assertEqual(len(data), 24)
+ elif len(vpn_id) > 0:
+ self.assertEqual(len(data), len(vpn_id) + 17)
+ else:
+ self.assertEqual(len(data), 12)
+
+ #
+ # First sub-option is ID 1, len 4, then encoded
+ # sw_if_index. This test uses low valued indicies
+ # so [2:4] are 0.
+ # The ID space is VPP internal - so no matching value
+ # scapy
+ #
+ self.assertEqual(six.byte2int(data[0:1]), 1)
+ self.assertEqual(six.byte2int(data[1:2]), 4)
+ self.assertEqual(six.byte2int(data[2:3]), 0)
+ self.assertEqual(six.byte2int(data[3:4]), 0)
+ self.assertEqual(six.byte2int(data[4:5]), 0)
+ self.assertEqual(six.byte2int(data[5:6]),
+ intf._sw_if_index)
+
+ #
+ # next sub-option is the IP address of the client side
+ # interface.
+ # sub-option ID=5, length (of a v4 address)=4
+ #
+ claddr = socket.inet_pton(AF_INET, ip_addr)
+
+ self.assertEqual(six.byte2int(data[6:7]), 5)
+ self.assertEqual(six.byte2int(data[7:8]), 4)
+ self.assertEqual(data[8], claddr[0])
+ self.assertEqual(data[9], claddr[1])
+ self.assertEqual(data[10], claddr[2])
+ self.assertEqual(data[11], claddr[3])
+
+ if oui != 0:
+ # sub-option 151 encodes vss_type 1,
+ # the 3 byte oui and the 4 byte fib_id
+ self.assertEqual(id_len, 0)
+ self.assertEqual(six.byte2int(data[12:13]), 151)
+ self.assertEqual(six.byte2int(data[13:14]), 8)
+ self.assertEqual(six.byte2int(data[14:15]), 1)
+ self.assertEqual(six.byte2int(data[15:16]), 0)
+ self.assertEqual(six.byte2int(data[16:17]), 0)
+ self.assertEqual(six.byte2int(data[17:18]), oui)
+ self.assertEqual(six.byte2int(data[18:19]), 0)
+ self.assertEqual(six.byte2int(data[19:20]), 0)
+ self.assertEqual(six.byte2int(data[20:21]), 0)
+ self.assertEqual(six.byte2int(data[21:22]), fib_id)
+
+ # VSS control sub-option
+ self.assertEqual(six.byte2int(data[22:23]), 152)
+ self.assertEqual(six.byte2int(data[23:24]), 0)
+
+ if id_len > 0:
+ # sub-option 151 encode vss_type of 0
+ # followerd by vpn_id in ascii
+ self.assertEqual(oui, 0)
+ self.assertEqual(six.byte2int(data[12:13]), 151)
+ self.assertEqual(six.byte2int(data[13:14]), id_len + 1)
+ self.assertEqual(six.byte2int(data[14:15]), 0)
+ self.assertEqual(data[15:15 + id_len].decode('ascii'),
+ vpn_id)
+
+ # VSS control sub-option
+ self.assertEqual(six.byte2int(data[15 + len(vpn_id):
+ 16 + len(vpn_id)]),
+ 152)
+ self.assertEqual(six.byte2int(data[16 + len(vpn_id):
+ 17 + len(vpn_id)]),
+ 0)
+
+ found = 1
+ self.assertTrue(found)
+
+ return data
+
+ def verify_dhcp_msg_type(self, pkt, name):
+ dhcp = pkt[DHCP]
+ found = False
+ for o in dhcp.options:
+ if isinstance(o, tuple):
+ if o[0] == "message-type" \
+ and DHCPTypes[o[1]] == name:
+ found = True
+ self.assertTrue(found)
+
+ def verify_dhcp_offer(self, pkt, intf, vpn_id="", fib_id=0, oui=0):
+ ether = pkt[Ether]
+ self.assertEqual(ether.dst, "ff:ff:ff:ff:ff:ff")
+ self.assertEqual(ether.src, intf.local_mac)
+
+ ip = pkt[IP]
+ self.assertEqual(ip.dst, "255.255.255.255")
+ self.assertEqual(ip.src, intf.local_ip4)
+
+ udp = pkt[UDP]
+ self.assertEqual(udp.dport, DHCP4_CLIENT_PORT)
+ self.assertEqual(udp.sport, DHCP4_SERVER_PORT)
+
+ self.verify_dhcp_msg_type(pkt, "offer")
+ data = self.validate_relay_options(pkt, intf, intf.local_ip4,
+ vpn_id, fib_id, oui)
+
+ def verify_orig_dhcp_pkt(self, pkt, intf, dscp, l2_bc=True):
+ ether = pkt[Ether]
+ if l2_bc:
+ self.assertEqual(ether.dst, "ff:ff:ff:ff:ff:ff")
+ else:
+ self.assertEqual(ether.dst, intf.remote_mac)
+ self.assertEqual(ether.src, intf.local_mac)
+
+ ip = pkt[IP]
+
+ if (l2_bc):
+ self.assertEqual(ip.dst, "255.255.255.255")
+ self.assertEqual(ip.src, "0.0.0.0")
+ else:
+ self.assertEqual(ip.dst, intf.remote_ip4)
+ self.assertEqual(ip.src, intf.local_ip4)
+ self.assertEqual(ip.tos, dscp)
+
+ udp = pkt[UDP]
+ self.assertEqual(udp.dport, DHCP4_SERVER_PORT)
+ self.assertEqual(udp.sport, DHCP4_CLIENT_PORT)
+
+ def verify_orig_dhcp_discover(self, pkt, intf, hostname, client_id=None,
+ broadcast=True, dscp=0):
+ self.verify_orig_dhcp_pkt(pkt, intf, dscp)
+
+ self.verify_dhcp_msg_type(pkt, "discover")
+ self.verify_dhcp_has_option(pkt, "hostname",
+ hostname.encode('ascii'))
+ if client_id:
+ client_id = '\x00' + client_id
+ self.verify_dhcp_has_option(pkt, "client_id",
+ client_id.encode('ascii'))
+ bootp = pkt[BOOTP]
+ self.assertEqual(bootp.ciaddr, "0.0.0.0")
+ self.assertEqual(bootp.giaddr, "0.0.0.0")
+ if broadcast:
+ self.assertEqual(bootp.flags, 0x8000)
+ else:
+ self.assertEqual(bootp.flags, 0x0000)
+
+ def verify_orig_dhcp_request(self, pkt, intf, hostname, ip,
+ broadcast=True,
+ l2_bc=True,
+ dscp=0):
+ self.verify_orig_dhcp_pkt(pkt, intf, dscp, l2_bc=l2_bc)
+
+ self.verify_dhcp_msg_type(pkt, "request")
+ self.verify_dhcp_has_option(pkt, "hostname",
+ hostname.encode('ascii'))
+ self.verify_dhcp_has_option(pkt, "requested_addr", ip)
+ bootp = pkt[BOOTP]
+
+ if l2_bc:
+ self.assertEqual(bootp.ciaddr, "0.0.0.0")
+ else:
+ self.assertEqual(bootp.ciaddr, intf.local_ip4)
+ self.assertEqual(bootp.giaddr, "0.0.0.0")
+
+ if broadcast:
+ self.assertEqual(bootp.flags, 0x8000)
+ else:
+ self.assertEqual(bootp.flags, 0x0000)
+
+ def verify_relayed_dhcp_discover(self, pkt, intf, src_intf=None,
+ fib_id=0, oui=0,
+ vpn_id="",
+ dst_mac=None, dst_ip=None):
+ if not dst_mac:
+ dst_mac = intf.remote_mac
+ if not dst_ip:
+ dst_ip = intf.remote_ip4
+
+ ether = pkt[Ether]
+ self.assertEqual(ether.dst, dst_mac)
+ self.assertEqual(ether.src, intf.local_mac)
+
+ ip = pkt[IP]
+ self.assertEqual(ip.dst, dst_ip)
+ self.assertEqual(ip.src, intf.local_ip4)
+
+ udp = pkt[UDP]
+ self.assertEqual(udp.dport, DHCP4_SERVER_PORT)
+ self.assertEqual(udp.sport, DHCP4_CLIENT_PORT)
+
+ dhcp = pkt[DHCP]
+
+ is_discover = False
+ for o in dhcp.options:
+ if isinstance(o, tuple):
+ if o[0] == "message-type" \
+ and DHCPTypes[o[1]] == "discover":
+ is_discover = True
+ self.assertTrue(is_discover)
+
+ data = self.validate_relay_options(pkt, src_intf,
+ src_intf.local_ip4,
+ vpn_id,
+ fib_id, oui)
+ return data
+
+ def verify_dhcp6_solicit(self, pkt, intf,
+ peer_ip, peer_mac,
+ vpn_id="",
+ fib_id=0,
+ oui=0,
+ dst_mac=None,
+ dst_ip=None):
+ if not dst_mac:
+ dst_mac = intf.remote_mac
+ if not dst_ip:
+ dst_ip = in6_ptop(intf.remote_ip6)
+
+ ether = pkt[Ether]
+ self.assertEqual(ether.dst, dst_mac)
+ self.assertEqual(ether.src, intf.local_mac)
+
+ ip = pkt[IPv6]
+ self.assertEqual(in6_ptop(ip.dst), dst_ip)
+ self.assertEqual(in6_ptop(ip.src), in6_ptop(intf.local_ip6))
+
+ udp = pkt[UDP]
+ self.assertEqual(udp.dport, DHCP6_CLIENT_PORT)
+ self.assertEqual(udp.sport, DHCP6_SERVER_PORT)
+
+ relay = pkt[DHCP6_RelayForward]
+ self.assertEqual(in6_ptop(relay.peeraddr), in6_ptop(peer_ip))
+ oid = pkt[DHCP6OptIfaceId]
+ cll = pkt[DHCP6OptClientLinkLayerAddr]
+ self.assertEqual(cll.optlen, 8)
+ self.assertEqual(cll.lltype, 1)
+ self.assertEqual(cll.clladdr, peer_mac)
+
+ id_len = len(vpn_id)
+
+ if fib_id != 0:
+ self.assertEqual(id_len, 0)
+ vss = pkt[DHCP6OptVSS]
+ self.assertEqual(vss.optlen, 8)
+ self.assertEqual(vss.type, 1)
+ # the OUI and FIB-id are really 3 and 4 bytes resp.
+ # but the tested range is small
+ self.assertEqual(six.byte2int(vss.data[0:1]), 0)
+ self.assertEqual(six.byte2int(vss.data[1:2]), 0)
+ self.assertEqual(six.byte2int(vss.data[2:3]), oui)
+ self.assertEqual(six.byte2int(vss.data[3:4]), 0)
+ self.assertEqual(six.byte2int(vss.data[4:5]), 0)
+ self.assertEqual(six.byte2int(vss.data[5:6]), 0)
+ self.assertEqual(six.byte2int(vss.data[6:7]), fib_id)
+
+ if id_len > 0:
+ self.assertEqual(oui, 0)
+ vss = pkt[DHCP6OptVSS]
+ self.assertEqual(vss.optlen, id_len + 1)
+ self.assertEqual(vss.type, 0)
+ self.assertEqual(vss.data[0:id_len].decode('ascii'),
+ vpn_id)
+
+ # the relay message should be an encoded Solicit
+ msg = pkt[DHCP6OptRelayMsg]
+ sol = DHCP6_Solicit()
+ self.assertEqual(msg.optlen, len(sol))
+ self.assertEqual(sol, msg[1])
+
+ def verify_dhcp6_advert(self, pkt, intf, peer):
+ ether = pkt[Ether]
+ self.assertEqual(ether.dst, "ff:ff:ff:ff:ff:ff")
+ self.assertEqual(ether.src, intf.local_mac)
+
+ ip = pkt[IPv6]
+ self.assertEqual(in6_ptop(ip.dst), in6_ptop(peer))
+ self.assertEqual(in6_ptop(ip.src), in6_ptop(intf.local_ip6))
+
+ udp = pkt[UDP]
+ self.assertEqual(udp.dport, DHCP6_SERVER_PORT)
+ self.assertEqual(udp.sport, DHCP6_CLIENT_PORT)
+
+ # not sure why this is not decoding
+ # adv = pkt[DHCP6_Advertise]
+
+ def wait_for_no_route(self, address, length,
+ n_tries=50, s_time=1):
+ while (n_tries):
+ if not find_route(self, address, length):
+ return True
+ n_tries = n_tries - 1
+ self.sleep(s_time)
+
+ return False
+
+ def test_dhcp_proxy(self):
+ """ DHCPv4 Proxy """
+
+ #
+ # Verify no response to DHCP request without DHCP config
+ #
+ p_disc_vrf0 = (Ether(dst="ff:ff:ff:ff:ff:ff",
+ src=self.pg3.remote_mac) /
+ IP(src="0.0.0.0", dst="255.255.255.255") /
+ UDP(sport=DHCP4_CLIENT_PORT,
+ dport=DHCP4_SERVER_PORT) /
+ BOOTP(op=1) /
+ DHCP(options=[('message-type', 'discover'), ('end')]))
+ pkts_disc_vrf0 = [p_disc_vrf0]
+ p_disc_vrf1 = (Ether(dst="ff:ff:ff:ff:ff:ff",
+ src=self.pg4.remote_mac) /
+ IP(src="0.0.0.0", dst="255.255.255.255") /
+ UDP(sport=DHCP4_CLIENT_PORT,
+ dport=DHCP4_SERVER_PORT) /
+ BOOTP(op=1) /
+ DHCP(options=[('message-type', 'discover'), ('end')]))
+ pkts_disc_vrf1 = [p_disc_vrf1]
+ p_disc_vrf2 = (Ether(dst="ff:ff:ff:ff:ff:ff",
+ src=self.pg5.remote_mac) /
+ IP(src="0.0.0.0", dst="255.255.255.255") /
+ UDP(sport=DHCP4_CLIENT_PORT,
+ dport=DHCP4_SERVER_PORT) /
+ BOOTP(op=1) /
+ DHCP(options=[('message-type', 'discover'), ('end')]))
+ pkts_disc_vrf2 = [p_disc_vrf2]
+
+ self.send_and_assert_no_replies(self.pg3, pkts_disc_vrf0,
+ "DHCP with no configuration")
+ self.send_and_assert_no_replies(self.pg4, pkts_disc_vrf1,
+ "DHCP with no configuration")
+ self.send_and_assert_no_replies(self.pg5, pkts_disc_vrf2,
+ "DHCP with no configuration")
+
+ #
+ # Enable DHCP proxy in VRF 0
+ #
+ server_addr = self.pg0.remote_ip4
+ src_addr = self.pg0.local_ip4
+
+ Proxy = VppDHCPProxy(self, server_addr, src_addr, rx_vrf_id=0)
+ Proxy.add_vpp_config()
+
+ #
+ # Discover packets from the client are dropped because there is no
+ # IP address configured on the client facing interface
+ #
+ self.send_and_assert_no_replies(self.pg3, pkts_disc_vrf0,
+ "Discover DHCP no relay address")
+
+ #
+ # Inject a response from the server
+ # dropped, because there is no IP addrees on the
+ # client interfce to fill in the option.
+ #
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
+ UDP(sport=DHCP4_SERVER_PORT, dport=DHCP4_SERVER_PORT) /
+ BOOTP(op=1) /
+ DHCP(options=[('message-type', 'offer'), ('end')]))
+ pkts = [p]
+
+ self.send_and_assert_no_replies(self.pg3, pkts,
+ "Offer DHCP no relay address")
+
+ #
+ # configure an IP address on the client facing interface
+ #
+ self.pg3.config_ip4()
+
+ #
+ # Try again with a discover packet
+ # Rx'd packet should be to the server address and from the configured
+ # source address
+ # UDP source ports are unchanged
+ # we've no option 82 config so that should be absent
+ #
+ self.pg3.add_stream(pkts_disc_vrf0)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture(1)
+ rx = rx[0]
+
+ option_82 = self.verify_relayed_dhcp_discover(rx, self.pg0,
+ src_intf=self.pg3)
+
+ #
+ # Create an DHCP offer reply from the server with a correctly formatted
+ # option 82. i.e. send back what we just captured
+ # The offer, sent mcast to the client, still has option 82.
+ #
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
+ UDP(sport=DHCP4_SERVER_PORT, dport=DHCP4_SERVER_PORT) /
+ BOOTP(op=1) /
+ DHCP(options=[('message-type', 'offer'),
+ ('relay_agent_Information', option_82),
+ ('end')]))
+ pkts = [p]
+
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg3.get_capture(1)
+ rx = rx[0]
+
+ self.verify_dhcp_offer(rx, self.pg3)
+
+ #
+ # Bogus Option 82:
+ #
+ # 1. not our IP address = not checked by VPP? so offer is replayed
+ # to client
+ bad_ip = option_82[0:8] + scapy.compat.chb(33) + option_82[9:]
+
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
+ UDP(sport=DHCP4_SERVER_PORT, dport=DHCP4_SERVER_PORT) /
+ BOOTP(op=1) /
+ DHCP(options=[('message-type', 'offer'),
+ ('relay_agent_Information', bad_ip),
+ ('end')]))
+ pkts = [p]
+ self.send_and_assert_no_replies(self.pg0, pkts,
+ "DHCP offer option 82 bad address")
+
+ # 2. Not a sw_if_index VPP knows
+ bad_if_index = option_82[0:2] + scapy.compat.chb(33) + option_82[3:]
+
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
+ UDP(sport=DHCP4_SERVER_PORT, dport=DHCP4_SERVER_PORT) /
+ BOOTP(op=1) /
+ DHCP(options=[('message-type', 'offer'),
+ ('relay_agent_Information', bad_if_index),
+ ('end')]))
+ pkts = [p]
+ self.send_and_assert_no_replies(self.pg0, pkts,
+ "DHCP offer option 82 bad if index")
+
+ #
+ # Send a DHCP request in VRF 1. should be dropped.
+ #
+ self.send_and_assert_no_replies(self.pg4, pkts_disc_vrf1,
+ "DHCP with no configuration VRF 1")
+
+ #
+ # Delete the DHCP config in VRF 0
+ # Should now drop requests.
+ #
+ Proxy.remove_vpp_config()
+
+ self.send_and_assert_no_replies(self.pg3, pkts_disc_vrf0,
+ "DHCP config removed VRF 0")
+ self.send_and_assert_no_replies(self.pg4, pkts_disc_vrf1,
+ "DHCP config removed VRF 1")
+
+ #
+ # Add DHCP config for VRF 1 & 2
+ #
+ server_addr1 = self.pg1.remote_ip4
+ src_addr1 = self.pg1.local_ip4
+ Proxy1 = VppDHCPProxy(
+ self,
+ server_addr1,
+ src_addr1,
+ rx_vrf_id=1,
+ server_vrf_id=1)
+ Proxy1.add_vpp_config()
+
+ server_addr2 = self.pg2.remote_ip4
+ src_addr2 = self.pg2.local_ip4
+ Proxy2 = VppDHCPProxy(
+ self,
+ server_addr2,
+ src_addr2,
+ rx_vrf_id=2,
+ server_vrf_id=2)
+ Proxy2.add_vpp_config()
+
+ #
+ # Confim DHCP requests ok in VRF 1 & 2.
+ # - dropped on IP config on client interface
+ #
+ self.send_and_assert_no_replies(self.pg4, pkts_disc_vrf1,
+ "DHCP config removed VRF 1")
+ self.send_and_assert_no_replies(self.pg5, pkts_disc_vrf2,
+ "DHCP config removed VRF 2")
+
+ #
+ # configure an IP address on the client facing interface
+ #
+ self.pg4.config_ip4()
+ self.pg4.add_stream(pkts_disc_vrf1)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ rx = self.pg1.get_capture(1)
+ rx = rx[0]
+ self.verify_relayed_dhcp_discover(rx, self.pg1, src_intf=self.pg4)
+
+ self.pg5.config_ip4()
+ self.pg5.add_stream(pkts_disc_vrf2)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ rx = self.pg2.get_capture(1)
+ rx = rx[0]
+ self.verify_relayed_dhcp_discover(rx, self.pg2, src_intf=self.pg5)
+
+ #
+ # Add VSS config
+ # table=1, vss_type=1, vpn_index=1, oui=4
+ # table=2, vss_type=0, vpn_id = "ip4-table-2"
+ self.vapi.dhcp_proxy_set_vss(tbl_id=1, vss_type=1,
+ vpn_index=1, oui=4, is_add=1)
+ self.vapi.dhcp_proxy_set_vss(tbl_id=2, vss_type=0,
+ vpn_ascii_id="ip4-table-2", is_add=1)
+
+ self.pg4.add_stream(pkts_disc_vrf1)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg1.get_capture(1)
+ rx = rx[0]
+ self.verify_relayed_dhcp_discover(rx, self.pg1,
+ src_intf=self.pg4,
+ fib_id=1, oui=4)
+
+ self.pg5.add_stream(pkts_disc_vrf2)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg2.get_capture(1)
+ rx = rx[0]
+ self.verify_relayed_dhcp_discover(rx, self.pg2,
+ src_intf=self.pg5,
+ vpn_id="ip4-table-2")
+
+ #
+ # Add a second DHCP server in VRF 1
+ # expect clients messages to be relay to both configured servers
+ #
+ self.pg1.generate_remote_hosts(2)
+ server_addr12 = self.pg1.remote_hosts[1].ip4
+
+ Proxy12 = VppDHCPProxy(
+ self,
+ server_addr12,
+ src_addr,
+ rx_vrf_id=1,
+ server_vrf_id=1)
+ Proxy12.add_vpp_config()
+
+ #
+ # We'll need an ARP entry for the server to send it packets
+ #
+ arp_entry = VppNeighbor(self,
+ self.pg1.sw_if_index,
+ self.pg1.remote_hosts[1].mac,
+ self.pg1.remote_hosts[1].ip4)
+ arp_entry.add_vpp_config()
+
+ #
+ # Send a discover from the client. expect two relayed messages
+ # The frist packet is sent to the second server
+ # We're not enforcing that here, it's just the way it is.
+ #
+ self.pg4.add_stream(pkts_disc_vrf1)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg1.get_capture(2)
+
+ option_82 = self.verify_relayed_dhcp_discover(
+ rx[0], self.pg1,
+ src_intf=self.pg4,
+ dst_mac=self.pg1.remote_hosts[1].mac,
+ dst_ip=self.pg1.remote_hosts[1].ip4,
+ fib_id=1, oui=4)
+ self.verify_relayed_dhcp_discover(rx[1], self.pg1,
+ src_intf=self.pg4,
+ fib_id=1, oui=4)
+
+ #
+ # Send both packets back. Client gets both.
+ #
+ p1 = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.pg1.local_ip4) /
+ UDP(sport=DHCP4_SERVER_PORT, dport=DHCP4_SERVER_PORT) /
+ BOOTP(op=1) /
+ DHCP(options=[('message-type', 'offer'),
+ ('relay_agent_Information', option_82),
+ ('end')]))
+ p2 = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IP(src=self.pg1.remote_hosts[1].ip4, dst=self.pg1.local_ip4) /
+ UDP(sport=DHCP4_SERVER_PORT, dport=DHCP4_SERVER_PORT) /
+ BOOTP(op=1) /
+ DHCP(options=[('message-type', 'offer'),
+ ('relay_agent_Information', option_82),
+ ('end')]))
+ pkts = [p1, p2]
+
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg4.get_capture(2)
+
+ self.verify_dhcp_offer(rx[0], self.pg4, fib_id=1, oui=4)
+ self.verify_dhcp_offer(rx[1], self.pg4, fib_id=1, oui=4)
+
+ #
+ # Ensure offers from non-servers are dropeed
+ #
+ p2 = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IP(src="8.8.8.8", dst=self.pg1.local_ip4) /
+ UDP(sport=DHCP4_SERVER_PORT, dport=DHCP4_SERVER_PORT) /
+ BOOTP(op=1) /
+ DHCP(options=[('message-type', 'offer'),
+ ('relay_agent_Information', option_82),
+ ('end')]))
+ self.send_and_assert_no_replies(self.pg1, p2,
+ "DHCP offer from non-server")
+
+ #
+ # Ensure only the discover is sent to multiple servers
+ #
+ p_req_vrf1 = (Ether(dst="ff:ff:ff:ff:ff:ff",
+ src=self.pg4.remote_mac) /
+ IP(src="0.0.0.0", dst="255.255.255.255") /
+ UDP(sport=DHCP4_CLIENT_PORT,
+ dport=DHCP4_SERVER_PORT) /
+ BOOTP(op=1) /
+ DHCP(options=[('message-type', 'request'),
+ ('end')]))
+
+ self.pg4.add_stream(p_req_vrf1)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg1.get_capture(1)
+
+ #
+ # Remove the second DHCP server
+ #
+ Proxy12.remove_vpp_config()
+
+ #
+ # Test we can still relay with the first
+ #
+ self.pg4.add_stream(pkts_disc_vrf1)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg1.get_capture(1)
+ rx = rx[0]
+ self.verify_relayed_dhcp_discover(rx, self.pg1,
+ src_intf=self.pg4,
+ fib_id=1, oui=4)
+
+ #
+ # Remove the VSS config
+ # relayed DHCP has default vlaues in the option.
+ #
+ self.vapi.dhcp_proxy_set_vss(tbl_id=1, is_add=0)
+ self.vapi.dhcp_proxy_set_vss(tbl_id=2, is_add=0)
+
+ self.pg4.add_stream(pkts_disc_vrf1)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg1.get_capture(1)
+ rx = rx[0]
+ self.verify_relayed_dhcp_discover(rx, self.pg1, src_intf=self.pg4)
+
+ #
+ # remove DHCP config to cleanup
+ #
+ Proxy1.remove_vpp_config()
+ Proxy2.remove_vpp_config()
+
+ self.send_and_assert_no_replies(self.pg3, pkts_disc_vrf0,
+ "DHCP cleanup VRF 0")
+ self.send_and_assert_no_replies(self.pg4, pkts_disc_vrf1,
+ "DHCP cleanup VRF 1")
+ self.send_and_assert_no_replies(self.pg5, pkts_disc_vrf2,
+ "DHCP cleanup VRF 2")
+
+ self.pg3.unconfig_ip4()
+ self.pg4.unconfig_ip4()
+ self.pg5.unconfig_ip4()
+
+ def test_dhcp6_proxy(self):
+ """ DHCPv6 Proxy"""
+ #
+ # Verify no response to DHCP request without DHCP config
+ #
+ dhcp_solicit_dst = "ff02::1:2"
+ dhcp_solicit_src_vrf0 = mk_ll_addr(self.pg3.remote_mac)
+ dhcp_solicit_src_vrf1 = mk_ll_addr(self.pg4.remote_mac)
+ dhcp_solicit_src_vrf2 = mk_ll_addr(self.pg5.remote_mac)
+ server_addr_vrf0 = self.pg0.remote_ip6
+ src_addr_vrf0 = self.pg0.local_ip6
+ server_addr_vrf1 = self.pg1.remote_ip6
+ src_addr_vrf1 = self.pg1.local_ip6
+ server_addr_vrf2 = self.pg2.remote_ip6
+ src_addr_vrf2 = self.pg2.local_ip6
+
+ dmac = in6_getnsmac(inet_pton(socket.AF_INET6, dhcp_solicit_dst))
+ p_solicit_vrf0 = (Ether(dst=dmac, src=self.pg3.remote_mac) /
+ IPv6(src=dhcp_solicit_src_vrf0,
+ dst=dhcp_solicit_dst) /
+ UDP(sport=DHCP6_SERVER_PORT,
+ dport=DHCP6_CLIENT_PORT) /
+ DHCP6_Solicit())
+ p_solicit_vrf1 = (Ether(dst=dmac, src=self.pg4.remote_mac) /
+ IPv6(src=dhcp_solicit_src_vrf1,
+ dst=dhcp_solicit_dst) /
+ UDP(sport=DHCP6_SERVER_PORT,
+ dport=DHCP6_CLIENT_PORT) /
+ DHCP6_Solicit())
+ p_solicit_vrf2 = (Ether(dst=dmac, src=self.pg5.remote_mac) /
+ IPv6(src=dhcp_solicit_src_vrf2,
+ dst=dhcp_solicit_dst) /
+ UDP(sport=DHCP6_SERVER_PORT,
+ dport=DHCP6_CLIENT_PORT) /
+ DHCP6_Solicit())
+
+ self.send_and_assert_no_replies(self.pg3, p_solicit_vrf0,
+ "DHCP with no configuration")
+ self.send_and_assert_no_replies(self.pg4, p_solicit_vrf1,
+ "DHCP with no configuration")
+ self.send_and_assert_no_replies(self.pg5, p_solicit_vrf2,
+ "DHCP with no configuration")
+
+ #
+ # DHCPv6 config in VRF 0.
+ # Packets still dropped because the client facing interface has no
+ # IPv6 config
+ #
+ Proxy = VppDHCPProxy(
+ self,
+ server_addr_vrf0,
+ src_addr_vrf0,
+ rx_vrf_id=0,
+ server_vrf_id=0)
+ Proxy.add_vpp_config()
+
+ self.send_and_assert_no_replies(self.pg3, p_solicit_vrf0,
+ "DHCP with no configuration")
+ self.send_and_assert_no_replies(self.pg4, p_solicit_vrf1,
+ "DHCP with no configuration")
+
+ #
+ # configure an IP address on the client facing interface
+ #
+ self.pg3.config_ip6()
+
+ #
+ # Now the DHCP requests are relayed to the server
+ #
+ self.pg3.add_stream(p_solicit_vrf0)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture(1)
+
+ self.verify_dhcp6_solicit(rx[0], self.pg0,
+ dhcp_solicit_src_vrf0,
+ self.pg3.remote_mac)
+
+ #
+ # Exception cases for rejected relay responses
+ #
+
+ # 1 - not a relay reply
+ p_adv_vrf0 = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(dst=self.pg0.local_ip6, src=self.pg0.remote_ip6) /
+ UDP(sport=DHCP6_SERVER_PORT, dport=DHCP6_SERVER_PORT) /
+ DHCP6_Advertise())
+ self.send_and_assert_no_replies(self.pg3, p_adv_vrf0,
+ "DHCP6 not a relay reply")
+
+ # 2 - no relay message option
+ p_adv_vrf0 = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(dst=self.pg0.local_ip6, src=self.pg0.remote_ip6) /
+ UDP(sport=DHCP6_SERVER_PORT, dport=DHCP6_SERVER_PORT) /
+ DHCP6_RelayReply() /
+ DHCP6_Advertise())
+ self.send_and_assert_no_replies(self.pg3, p_adv_vrf0,
+ "DHCP not a relay message")
+
+ # 3 - no circuit ID
+ p_adv_vrf0 = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(dst=self.pg0.local_ip6, src=self.pg0.remote_ip6) /
+ UDP(sport=DHCP6_SERVER_PORT, dport=DHCP6_SERVER_PORT) /
+ DHCP6_RelayReply() /
+ DHCP6OptRelayMsg(optlen=0) /
+ DHCP6_Advertise())
+ self.send_and_assert_no_replies(self.pg3, p_adv_vrf0,
+ "DHCP6 no circuit ID")
+ # 4 - wrong circuit ID
+ p_adv_vrf0 = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(dst=self.pg0.local_ip6, src=self.pg0.remote_ip6) /
+ UDP(sport=DHCP6_SERVER_PORT, dport=DHCP6_SERVER_PORT) /
+ DHCP6_RelayReply() /
+ DHCP6OptIfaceId(optlen=4, ifaceid='\x00\x00\x00\x05') /
+ DHCP6OptRelayMsg(optlen=0) /
+ DHCP6_Advertise())
+ self.send_and_assert_no_replies(self.pg3, p_adv_vrf0,
+ "DHCP6 wrong circuit ID")
+
+ #
+ # Send the relay response (the advertisement)
+ # - no peer address
+ p_adv_vrf0 = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(dst=self.pg0.local_ip6, src=self.pg0.remote_ip6) /
+ UDP(sport=DHCP6_SERVER_PORT, dport=DHCP6_SERVER_PORT) /
+ DHCP6_RelayReply() /
+ DHCP6OptIfaceId(optlen=4, ifaceid='\x00\x00\x00\x04') /
+ DHCP6OptRelayMsg(optlen=0) /
+ DHCP6_Advertise(trid=1) /
+ DHCP6OptStatusCode(statuscode=0))
+ pkts_adv_vrf0 = [p_adv_vrf0]
+
+ self.pg0.add_stream(pkts_adv_vrf0)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg3.get_capture(1)
+
+ self.verify_dhcp6_advert(rx[0], self.pg3, "::")
+
+ #
+ # Send the relay response (the advertisement)
+ # - with peer address
+ p_adv_vrf0 = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(dst=self.pg0.local_ip6, src=self.pg0.remote_ip6) /
+ UDP(sport=DHCP6_SERVER_PORT, dport=DHCP6_SERVER_PORT) /
+ DHCP6_RelayReply(peeraddr=dhcp_solicit_src_vrf0) /
+ DHCP6OptIfaceId(optlen=4, ifaceid='\x00\x00\x00\x04') /
+ DHCP6OptRelayMsg(optlen=0) /
+ DHCP6_Advertise(trid=1) /
+ DHCP6OptStatusCode(statuscode=0))
+ pkts_adv_vrf0 = [p_adv_vrf0]
+
+ self.pg0.add_stream(pkts_adv_vrf0)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg3.get_capture(1)
+
+ self.verify_dhcp6_advert(rx[0], self.pg3, dhcp_solicit_src_vrf0)
+
+ #
+ # Add all the config for VRF 1 & 2
+ #
+ Proxy1 = VppDHCPProxy(
+ self,
+ server_addr_vrf1,
+ src_addr_vrf1,
+ rx_vrf_id=1,
+ server_vrf_id=1)
+ Proxy1.add_vpp_config()
+ self.pg4.config_ip6()
+
+ Proxy2 = VppDHCPProxy(
+ self,
+ server_addr_vrf2,
+ src_addr_vrf2,
+ rx_vrf_id=2,
+ server_vrf_id=2)
+ Proxy2.add_vpp_config()
+ self.pg5.config_ip6()
+
+ #
+ # VRF 1 solicit
+ #
+ self.pg4.add_stream(p_solicit_vrf1)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg1.get_capture(1)
+
+ self.verify_dhcp6_solicit(rx[0], self.pg1,
+ dhcp_solicit_src_vrf1,
+ self.pg4.remote_mac)
+
+ #
+ # VRF 2 solicit
+ #
+ self.pg5.add_stream(p_solicit_vrf2)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg2.get_capture(1)
+
+ self.verify_dhcp6_solicit(rx[0], self.pg2,
+ dhcp_solicit_src_vrf2,
+ self.pg5.remote_mac)
+
+ #
+ # VRF 1 Advert
+ #
+ p_adv_vrf1 = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IPv6(dst=self.pg1.local_ip6, src=self.pg1.remote_ip6) /
+ UDP(sport=DHCP6_SERVER_PORT, dport=DHCP6_SERVER_PORT) /
+ DHCP6_RelayReply(peeraddr=dhcp_solicit_src_vrf1) /
+ DHCP6OptIfaceId(optlen=4, ifaceid='\x00\x00\x00\x05') /
+ DHCP6OptRelayMsg(optlen=0) /
+ DHCP6_Advertise(trid=1) /
+ DHCP6OptStatusCode(statuscode=0))
+ pkts_adv_vrf1 = [p_adv_vrf1]
+
+ self.pg1.add_stream(pkts_adv_vrf1)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg4.get_capture(1)
+
+ self.verify_dhcp6_advert(rx[0], self.pg4, dhcp_solicit_src_vrf1)
+
+ #
+ # Add VSS config
+ #
+ self.vapi.dhcp_proxy_set_vss(
+ tbl_id=1, vss_type=1, oui=4, vpn_index=1, is_ipv6=1, is_add=1)
+ self.vapi.dhcp_proxy_set_vss(
+ tbl_id=2,
+ vss_type=0,
+ vpn_ascii_id="IPv6-table-2",
+ is_ipv6=1,
+ is_add=1)
+
+ self.pg4.add_stream(p_solicit_vrf1)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg1.get_capture(1)
+
+ self.verify_dhcp6_solicit(rx[0], self.pg1,
+ dhcp_solicit_src_vrf1,
+ self.pg4.remote_mac,
+ fib_id=1,
+ oui=4)
+
+ self.pg5.add_stream(p_solicit_vrf2)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg2.get_capture(1)
+
+ self.verify_dhcp6_solicit(rx[0], self.pg2,
+ dhcp_solicit_src_vrf2,
+ self.pg5.remote_mac,
+ vpn_id="IPv6-table-2")
+
+ #
+ # Remove the VSS config
+ # relayed DHCP has default vlaues in the option.
+ #
+ self.vapi.dhcp_proxy_set_vss(tbl_id=1, is_ipv6=1, is_add=0)
+
+ self.pg4.add_stream(p_solicit_vrf1)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg1.get_capture(1)
+
+ self.verify_dhcp6_solicit(rx[0], self.pg1,
+ dhcp_solicit_src_vrf1,
+ self.pg4.remote_mac)
+
+ #
+ # Add a second DHCP server in VRF 1
+ # expect clients messages to be relay to both configured servers
+ #
+ self.pg1.generate_remote_hosts(2)
+ server_addr12 = self.pg1.remote_hosts[1].ip6
+
+ Proxy12 = VppDHCPProxy(
+ self,
+ server_addr12,
+ src_addr_vrf1,
+ rx_vrf_id=1,
+ server_vrf_id=1)
+ Proxy12.add_vpp_config()
+
+ #
+ # We'll need an ND entry for the server to send it packets
+ #
+ nd_entry = VppNeighbor(self,
+ self.pg1.sw_if_index,
+ self.pg1.remote_hosts[1].mac,
+ self.pg1.remote_hosts[1].ip6)
+ nd_entry.add_vpp_config()
+
+ #
+ # Send a discover from the client. expect two relayed messages
+ # The frist packet is sent to the second server
+ # We're not enforcing that here, it's just the way it is.
+ #
+ self.pg4.add_stream(p_solicit_vrf1)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg1.get_capture(2)
+
+ self.verify_dhcp6_solicit(rx[0], self.pg1,
+ dhcp_solicit_src_vrf1,
+ self.pg4.remote_mac)
+ self.verify_dhcp6_solicit(rx[1], self.pg1,
+ dhcp_solicit_src_vrf1,
+ self.pg4.remote_mac,
+ dst_mac=self.pg1.remote_hosts[1].mac,
+ dst_ip=self.pg1.remote_hosts[1].ip6)
+
+ #
+ # Send both packets back. Client gets both.
+ #
+ p1 = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IPv6(dst=self.pg1.local_ip6, src=self.pg1.remote_ip6) /
+ UDP(sport=DHCP6_SERVER_PORT, dport=DHCP6_SERVER_PORT) /
+ DHCP6_RelayReply(peeraddr=dhcp_solicit_src_vrf1) /
+ DHCP6OptIfaceId(optlen=4, ifaceid='\x00\x00\x00\x05') /
+ DHCP6OptRelayMsg(optlen=0) /
+ DHCP6_Advertise(trid=1) /
+ DHCP6OptStatusCode(statuscode=0))
+ p2 = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_hosts[1].mac) /
+ IPv6(dst=self.pg1.local_ip6, src=self.pg1._remote_hosts[1].ip6) /
+ UDP(sport=DHCP6_SERVER_PORT, dport=DHCP6_SERVER_PORT) /
+ DHCP6_RelayReply(peeraddr=dhcp_solicit_src_vrf1) /
+ DHCP6OptIfaceId(optlen=4, ifaceid='\x00\x00\x00\x05') /
+ DHCP6OptRelayMsg(optlen=0) /
+ DHCP6_Advertise(trid=1) /
+ DHCP6OptStatusCode(statuscode=0))
+
+ pkts = [p1, p2]
+
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg4.get_capture(2)
+
+ self.verify_dhcp6_advert(rx[0], self.pg4, dhcp_solicit_src_vrf1)
+ self.verify_dhcp6_advert(rx[1], self.pg4, dhcp_solicit_src_vrf1)
+
+ #
+ # Ensure only solicit messages are duplicated
+ #
+ p_request_vrf1 = (Ether(dst=dmac, src=self.pg4.remote_mac) /
+ IPv6(src=dhcp_solicit_src_vrf1,
+ dst=dhcp_solicit_dst) /
+ UDP(sport=DHCP6_SERVER_PORT,
+ dport=DHCP6_CLIENT_PORT) /
+ DHCP6_Request())
+
+ self.pg4.add_stream(p_request_vrf1)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg1.get_capture(1)
+
+ #
+ # Test we drop DHCP packets from addresses that are not configured as
+ # DHCP servers
+ #
+ p2 = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_hosts[1].mac) /
+ IPv6(dst=self.pg1.local_ip6, src="3001::1") /
+ UDP(sport=DHCP6_SERVER_PORT, dport=DHCP6_SERVER_PORT) /
+ DHCP6_RelayReply(peeraddr=dhcp_solicit_src_vrf1) /
+ DHCP6OptIfaceId(optlen=4, ifaceid='\x00\x00\x00\x05') /
+ DHCP6OptRelayMsg(optlen=0) /
+ DHCP6_Advertise(trid=1) /
+ DHCP6OptStatusCode(statuscode=0))
+ self.send_and_assert_no_replies(self.pg1, p2,
+ "DHCP6 not from server")
+
+ #
+ # Remove the second DHCP server
+ #
+ Proxy12.remove_vpp_config()
+
+ #
+ # Test we can still relay with the first
+ #
+ self.pg4.add_stream(p_solicit_vrf1)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg1.get_capture(1)
+
+ self.verify_dhcp6_solicit(rx[0], self.pg1,
+ dhcp_solicit_src_vrf1,
+ self.pg4.remote_mac)
+
+ #
+ # Cleanup
+ #
+ Proxy.remove_vpp_config()
+ Proxy1.remove_vpp_config()
+ Proxy2.remove_vpp_config()
+
+ self.pg3.unconfig_ip6()
+ self.pg4.unconfig_ip6()
+ self.pg5.unconfig_ip6()
+
+ def test_dhcp_client(self):
+ """ DHCP Client"""
+
+ vdscp = VppEnum.vl_api_ip_dscp_t
+ hostname = 'universal-dp'
+
+ self.pg_enable_capture(self.pg_interfaces)
+
+ #
+ # Configure DHCP client on PG3 and capture the discover sent
+ #
+ Client = VppDHCPClient(self, self.pg3.sw_if_index, hostname)
+ Client.add_vpp_config()
+ self.assertTrue(Client.query_vpp_config())
+
+ rx = self.pg3.get_capture(1)
+
+ self.verify_orig_dhcp_discover(rx[0], self.pg3, hostname)
+
+ #
+ # Send back on offer, expect the request
+ #
+ p_offer = (Ether(dst=self.pg3.local_mac, src=self.pg3.remote_mac) /
+ IP(src=self.pg3.remote_ip4, dst="255.255.255.255") /
+ UDP(sport=DHCP4_SERVER_PORT, dport=DHCP4_CLIENT_PORT) /
+ BOOTP(op=1,
+ yiaddr=self.pg3.local_ip4,
+ chaddr=mac_pton(self.pg3.local_mac)) /
+ DHCP(options=[('message-type', 'offer'),
+ ('server_id', self.pg3.remote_ip4),
+ 'end']))
+
+ self.pg3.add_stream(p_offer)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg3.get_capture(1)
+ self.verify_orig_dhcp_request(rx[0], self.pg3, hostname,
+ self.pg3.local_ip4)
+
+ #
+ # Send an acknowledgment
+ #
+ p_ack = (Ether(dst=self.pg3.local_mac, src=self.pg3.remote_mac) /
+ IP(src=self.pg3.remote_ip4, dst="255.255.255.255") /
+ UDP(sport=DHCP4_SERVER_PORT, dport=DHCP4_CLIENT_PORT) /
+ BOOTP(op=1, yiaddr=self.pg3.local_ip4,
+ chaddr=mac_pton(self.pg3.local_mac)) /
+ DHCP(options=[('message-type', 'ack'),
+ ('subnet_mask', "255.255.255.0"),
+ ('router', self.pg3.remote_ip4),
+ ('server_id', self.pg3.remote_ip4),
+ ('lease_time', 43200),
+ 'end']))
+
+ self.pg3.add_stream(p_ack)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ #
+ # We'll get an ARP request for the router address
+ #
+ rx = self.pg3.get_capture(1)
+
+ self.assertEqual(rx[0][ARP].pdst, self.pg3.remote_ip4)
+ self.pg_enable_capture(self.pg_interfaces)
+
+ #
+ # At the end of this procedure there should be a connected route
+ # in the FIB
+ #
+ self.assertTrue(find_route(self, self.pg3.local_ip4, 24))
+ self.assertTrue(find_route(self, self.pg3.local_ip4, 32))
+
+ #
+ # remove the DHCP config
+ #
+ Client.remove_vpp_config()
+
+ #
+ # and now the route should be gone
+ #
+ self.assertFalse(find_route(self, self.pg3.local_ip4, 32))
+ self.assertFalse(find_route(self, self.pg3.local_ip4, 24))
+
+ #
+ # Start the procedure again. this time have VPP send the client-ID
+ # and set the DSCP value
+ #
+ self.pg3.admin_down()
+ self.sleep(1)
+ self.pg3.admin_up()
+ Client.set_client(self.pg3.sw_if_index, hostname,
+ id=self.pg3.local_mac,
+ dscp=vdscp.IP_API_DSCP_EF)
+ Client.add_vpp_config()
+
+ rx = self.pg3.get_capture(1)
+
+ self.verify_orig_dhcp_discover(rx[0], self.pg3, hostname,
+ self.pg3.local_mac,
+ dscp=vdscp.IP_API_DSCP_EF)
+
+ # TODO: VPP DHCP client should not accept DHCP OFFER message with
+ # the XID (Transaction ID) not matching the XID of the most recent
+ # DHCP DISCOVERY message.
+ # Such DHCP OFFER message must be silently discarded - RFC2131.
+ # Reported in Jira ticket: VPP-99
+ self.pg3.add_stream(p_offer)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg3.get_capture(1)
+ self.verify_orig_dhcp_request(rx[0], self.pg3, hostname,
+ self.pg3.local_ip4,
+ dscp=vdscp.IP_API_DSCP_EF)
+
+ #
+ # unicast the ack to the offered address
+ #
+ p_ack = (Ether(dst=self.pg3.local_mac, src=self.pg3.remote_mac) /
+ IP(src=self.pg3.remote_ip4, dst=self.pg3.local_ip4) /
+ UDP(sport=DHCP4_SERVER_PORT, dport=DHCP4_CLIENT_PORT) /
+ BOOTP(op=1, yiaddr=self.pg3.local_ip4,
+ chaddr=mac_pton(self.pg3.local_mac)) /
+ DHCP(options=[('message-type', 'ack'),
+ ('subnet_mask', "255.255.255.0"),
+ ('router', self.pg3.remote_ip4),
+ ('server_id', self.pg3.remote_ip4),
+ ('lease_time', 43200),
+ 'end']))
+
+ self.pg3.add_stream(p_ack)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ #
+ # We'll get an ARP request for the router address
+ #
+ rx = self.pg3.get_capture(1)
+
+ self.assertEqual(rx[0][ARP].pdst, self.pg3.remote_ip4)
+ self.pg_enable_capture(self.pg_interfaces)
+
+ #
+ # At the end of this procedure there should be a connected route
+ # in the FIB
+ #
+ self.assertTrue(find_route(self, self.pg3.local_ip4, 32))
+ self.assertTrue(find_route(self, self.pg3.local_ip4, 24))
+
+ #
+ # remove the DHCP config
+ #
+ Client.remove_vpp_config()
+
+ self.assertFalse(find_route(self, self.pg3.local_ip4, 32))
+ self.assertFalse(find_route(self, self.pg3.local_ip4, 24))
+
+ #
+ # Rince and repeat, this time with VPP configured not to set
+ # the braodcast flag in the discover and request messages,
+ # and for the server to unicast the responses.
+ #
+ # Configure DHCP client on PG3 and capture the discover sent
+ #
+ Client.set_client(
+ self.pg3.sw_if_index,
+ hostname,
+ set_broadcast_flag=False)
+ Client.add_vpp_config()
+
+ rx = self.pg3.get_capture(1)
+
+ self.verify_orig_dhcp_discover(rx[0], self.pg3, hostname,
+ broadcast=False)
+
+ #
+ # Send back on offer, unicasted to the offered address.
+ # Expect the request.
+ #
+ p_offer = (Ether(dst=self.pg3.local_mac, src=self.pg3.remote_mac) /
+ IP(src=self.pg3.remote_ip4, dst=self.pg3.local_ip4) /
+ UDP(sport=DHCP4_SERVER_PORT, dport=DHCP4_CLIENT_PORT) /
+ BOOTP(op=1, yiaddr=self.pg3.local_ip4,
+ chaddr=mac_pton(self.pg3.local_mac)) /
+ DHCP(options=[('message-type', 'offer'),
+ ('server_id', self.pg3.remote_ip4),
+ 'end']))
+
+ self.pg3.add_stream(p_offer)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg3.get_capture(1)
+ self.verify_orig_dhcp_request(rx[0], self.pg3, hostname,
+ self.pg3.local_ip4,
+ broadcast=False)
+
+ #
+ # Send an acknowledgment, the lease renewal time is 2 seconds
+ # so we should expect the renew straight after
+ #
+ p_ack = (Ether(dst=self.pg3.local_mac, src=self.pg3.remote_mac) /
+ IP(src=self.pg3.remote_ip4, dst=self.pg3.local_ip4) /
+ UDP(sport=DHCP4_SERVER_PORT, dport=DHCP4_CLIENT_PORT) /
+ BOOTP(op=1, yiaddr=self.pg3.local_ip4,
+ chaddr=mac_pton(self.pg3.local_mac)) /
+ DHCP(options=[('message-type', 'ack'),
+ ('subnet_mask', "255.255.255.0"),
+ ('router', self.pg3.remote_ip4),
+ ('server_id', self.pg3.remote_ip4),
+ ('lease_time', 43200),
+ ('renewal_time', 2),
+ 'end']))
+
+ self.pg3.add_stream(p_ack)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ #
+ # We'll get an ARP request for the router address
+ #
+ rx = self.pg3.get_capture(1)
+
+ self.assertEqual(rx[0][ARP].pdst, self.pg3.remote_ip4)
+ self.pg_enable_capture(self.pg_interfaces)
+
+ #
+ # At the end of this procedure there should be a connected route
+ # in the FIB
+ #
+ self.assertTrue(find_route(self, self.pg3.local_ip4, 24))
+ self.assertTrue(find_route(self, self.pg3.local_ip4, 32))
+
+ #
+ # read the DHCP client details from a dump
+ #
+ clients = self.vapi.dhcp_client_dump()
+
+ self.assertEqual(clients[0].client.sw_if_index,
+ self.pg3.sw_if_index)
+ self.assertEqual(clients[0].lease.sw_if_index,
+ self.pg3.sw_if_index)
+ self.assertEqual(clients[0].client.hostname, hostname)
+ self.assertEqual(clients[0].lease.hostname, hostname)
+ # 0 = DISCOVER, 1 = REQUEST, 2 = BOUND
+ self.assertEqual(clients[0].lease.state, 2)
+ self.assertEqual(clients[0].lease.mask_width, 24)
+ self.assertEqual(str(clients[0].lease.router_address),
+ self.pg3.remote_ip4)
+ self.assertEqual(str(clients[0].lease.host_address),
+ self.pg3.local_ip4)
+
+ #
+ # wait for the unicasted renewal
+ # the first attempt will be an ARP packet, since we have not yet
+ # responded to VPP's request
+ #
+ self.logger.info(self.vapi.cli("sh dhcp client intfc pg3 verbose"))
+ rx = self.pg3.get_capture(1, timeout=10)
+
+ self.assertEqual(rx[0][ARP].pdst, self.pg3.remote_ip4)
+
+ # respond to the arp
+ p_arp = (Ether(dst=self.pg3.local_mac, src=self.pg3.remote_mac) /
+ ARP(op="is-at",
+ hwdst=self.pg3.local_mac,
+ hwsrc=self.pg3.remote_mac,
+ pdst=self.pg3.local_ip4,
+ psrc=self.pg3.remote_ip4))
+ self.pg3.add_stream(p_arp)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # the next packet is the unicasted renewal
+ rx = self.pg3.get_capture(1, timeout=10)
+ self.verify_orig_dhcp_request(rx[0], self.pg3, hostname,
+ self.pg3.local_ip4,
+ l2_bc=False,
+ broadcast=False)
+
+ # send an ACK with different data from the original offer *
+ self.pg3.generate_remote_hosts(4)
+ p_ack = (Ether(dst=self.pg3.local_mac, src=self.pg3.remote_mac) /
+ IP(src=self.pg3.remote_ip4, dst=self.pg3.local_ip4) /
+ UDP(sport=DHCP4_SERVER_PORT, dport=DHCP4_CLIENT_PORT) /
+ BOOTP(op=1, yiaddr=self.pg3.remote_hosts[3].ip4,
+ chaddr=mac_pton(self.pg3.local_mac)) /
+ DHCP(options=[('message-type', 'ack'),
+ ('subnet_mask', "255.255.255.0"),
+ ('router', self.pg3.remote_hosts[1].ip4),
+ ('server_id', self.pg3.remote_hosts[2].ip4),
+ ('lease_time', 43200),
+ ('renewal_time', 2),
+ 'end']))
+
+ self.pg3.add_stream(p_ack)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ #
+ # read the DHCP client details from a dump
+ #
+ clients = self.vapi.dhcp_client_dump()
+
+ self.assertEqual(clients[0].client.sw_if_index,
+ self.pg3.sw_if_index)
+ self.assertEqual(clients[0].lease.sw_if_index,
+ self.pg3.sw_if_index)
+ self.assertEqual(clients[0].client.hostname, hostname)
+ self.assertEqual(clients[0].lease.hostname, hostname)
+ # 0 = DISCOVER, 1 = REQUEST, 2 = BOUND
+ self.assertEqual(clients[0].lease.state, 2)
+ self.assertEqual(clients[0].lease.mask_width, 24)
+ self.assertEqual(str(clients[0].lease.router_address),
+ self.pg3.remote_hosts[1].ip4)
+ self.assertEqual(str(clients[0].lease.host_address),
+ self.pg3.remote_hosts[3].ip4)
+
+ #
+ # remove the DHCP config
+ #
+ Client.remove_vpp_config()
+
+ #
+ # and now the route should be gone
+ #
+ self.assertFalse(find_route(self, self.pg3.local_ip4, 32))
+ self.assertFalse(find_route(self, self.pg3.local_ip4, 24))
+
+ #
+ # Start the procedure again. Use requested lease time option.
+ # this time wait for the lease to expire and the client to
+ # self-destruct
+ #
+ hostname += "-2"
+ self.pg3.admin_down()
+ self.sleep(1)
+ self.pg3.admin_up()
+ self.pg_enable_capture(self.pg_interfaces)
+ Client.set_client(self.pg3.sw_if_index, hostname)
+ Client.add_vpp_config()
+
+ rx = self.pg3.get_capture(1)
+
+ self.verify_orig_dhcp_discover(rx[0], self.pg3, hostname)
+
+ #
+ # Send back on offer with requested lease time, expect the request
+ #
+ lease_time = 1
+ p_offer = (Ether(dst=self.pg3.local_mac, src=self.pg3.remote_mac) /
+ IP(src=self.pg3.remote_ip4, dst='255.255.255.255') /
+ UDP(sport=DHCP4_SERVER_PORT, dport=DHCP4_CLIENT_PORT) /
+ BOOTP(op=1,
+ yiaddr=self.pg3.local_ip4,
+ chaddr=mac_pton(self.pg3.local_mac)) /
+ DHCP(options=[('message-type', 'offer'),
+ ('server_id', self.pg3.remote_ip4),
+ ('lease_time', lease_time),
+ 'end']))
+
+ self.pg3.add_stream(p_offer)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg3.get_capture(1)
+ self.verify_orig_dhcp_request(rx[0], self.pg3, hostname,
+ self.pg3.local_ip4)
+
+ #
+ # Send an acknowledgment
+ #
+ p_ack = (Ether(dst=self.pg3.local_mac, src=self.pg3.remote_mac) /
+ IP(src=self.pg3.remote_ip4, dst='255.255.255.255') /
+ UDP(sport=DHCP4_SERVER_PORT, dport=DHCP4_CLIENT_PORT) /
+ BOOTP(op=1, yiaddr=self.pg3.local_ip4,
+ chaddr=mac_pton(self.pg3.local_mac)) /
+ DHCP(options=[('message-type', 'ack'),
+ ('subnet_mask', '255.255.255.0'),
+ ('router', self.pg3.remote_ip4),
+ ('server_id', self.pg3.remote_ip4),
+ ('lease_time', lease_time),
+ 'end']))
+
+ self.pg3.add_stream(p_ack)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ #
+ # We'll get an ARP request for the router address
+ #
+ rx = self.pg3.get_capture(1)
+
+ self.assertEqual(rx[0][ARP].pdst, self.pg3.remote_ip4)
+
+ #
+ # At the end of this procedure there should be a connected route
+ # in the FIB
+ #
+ self.assertTrue(find_route(self, self.pg3.local_ip4, 32))
+ self.assertTrue(find_route(self, self.pg3.local_ip4, 24))
+
+ #
+ # the route should be gone after the lease expires
+ #
+ self.assertTrue(self.wait_for_no_route(self.pg3.local_ip4, 32))
+ self.assertTrue(self.wait_for_no_route(self.pg3.local_ip4, 24))
+
+ #
+ # remove the DHCP config
+ #
+ Client.remove_vpp_config()
+
+ def test_dhcp_client_vlan(self):
+ """ DHCP Client w/ VLAN"""
+
+ vdscp = VppEnum.vl_api_ip_dscp_t
+ vqos = VppEnum.vl_api_qos_source_t
+ hostname = 'universal-dp'
+
+ self.pg_enable_capture(self.pg_interfaces)
+
+ vlan_100 = VppDot1QSubint(self, self.pg3, 100)
+ vlan_100.admin_up()
+
+ output = [scapy.compat.chb(4)] * 256
+ os = b''.join(output)
+ rows = [{'outputs': os},
+ {'outputs': os},
+ {'outputs': os},
+ {'outputs': os}]
+
+ qem1 = VppQosEgressMap(self, 1, rows).add_vpp_config()
+ qm1 = VppQosMark(self, vlan_100, qem1,
+ vqos.QOS_API_SOURCE_VLAN).add_vpp_config()
+
+ #
+ # Configure DHCP client on PG3 and capture the discover sent
+ #
+ Client = VppDHCPClient(
+ self,
+ vlan_100.sw_if_index,
+ hostname,
+ dscp=vdscp.IP_API_DSCP_EF)
+ Client.add_vpp_config()
+
+ rx = self.pg3.get_capture(1)
+
+ self.assertEqual(rx[0][Dot1Q].vlan, 100)
+ self.assertEqual(rx[0][Dot1Q].prio, 2)
+
+ self.verify_orig_dhcp_discover(rx[0], self.pg3, hostname,
+ dscp=vdscp.IP_API_DSCP_EF)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_dhcp6.py b/test/test_dhcp6.py
new file mode 100644
index 00000000000..57eb113fb13
--- /dev/null
+++ b/test/test_dhcp6.py
@@ -0,0 +1,805 @@
+from socket import AF_INET6, inet_ntop, inet_pton
+
+from scapy.layers.dhcp6 import DHCP6_Advertise, DHCP6OptClientId, \
+ DHCP6OptStatusCode, DHCP6OptPref, DHCP6OptIA_PD, DHCP6OptIAPrefix, \
+ DHCP6OptServerId, DHCP6_Solicit, DHCP6_Reply, DHCP6_Request, DHCP6_Renew, \
+ DHCP6_Rebind, DUID_LL, DHCP6_Release, DHCP6OptElapsedTime, DHCP6OptIA_NA, \
+ DHCP6OptIAAddress
+from scapy.layers.inet6 import IPv6, Ether, UDP
+from scapy.utils6 import in6_mactoifaceid
+
+from framework import tag_fixme_vpp_workers
+from framework import VppTestCase
+from framework import tag_run_solo
+from vpp_papi import VppEnum
+import util
+import os
+
+
+def ip6_normalize(ip6):
+ return inet_ntop(AF_INET6, inet_pton(AF_INET6, ip6))
+
+
+class TestDHCPv6DataPlane(VppTestCase):
+ """ DHCPv6 Data Plane Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestDHCPv6DataPlane, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestDHCPv6DataPlane, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestDHCPv6DataPlane, self).setUp()
+
+ self.create_pg_interfaces(range(1))
+ self.interfaces = list(self.pg_interfaces)
+ for i in self.interfaces:
+ i.admin_up()
+ i.config_ip6()
+
+ self.server_duid = DUID_LL(lladdr=self.pg0.remote_mac)
+
+ def tearDown(self):
+ for i in self.interfaces:
+ i.unconfig_ip6()
+ i.admin_down()
+ super(TestDHCPv6DataPlane, self).tearDown()
+
+ def test_dhcp_ia_na_send_solicit_receive_advertise(self):
+ """ Verify DHCPv6 IA NA Solicit packet and Advertise event """
+
+ self.vapi.dhcp6_clients_enable_disable(enable=1)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ address = {'address': '1:2:3::5',
+ 'preferred_time': 60,
+ 'valid_time': 120}
+ self.vapi.dhcp6_send_client_message(
+ server_index=0xffffffff,
+ mrc=1,
+ msg_type=VppEnum.vl_api_dhcpv6_msg_type_t.DHCPV6_MSG_API_SOLICIT,
+ sw_if_index=self.pg0.sw_if_index,
+ T1=20,
+ T2=40,
+ addresses=[address],
+ n_addresses=len(
+ [address]))
+ rx_list = self.pg0.get_capture(1)
+ self.assertEqual(len(rx_list), 1)
+ packet = rx_list[0]
+
+ self.assertEqual(packet.haslayer(IPv6), 1)
+ self.assertEqual(packet[IPv6].haslayer(DHCP6_Solicit), 1)
+
+ client_duid = packet[DHCP6OptClientId].duid
+ trid = packet[DHCP6_Solicit].trid
+
+ dst = ip6_normalize(packet[IPv6].dst)
+ dst2 = ip6_normalize("ff02::1:2")
+ self.assert_equal(dst, dst2)
+ src = ip6_normalize(packet[IPv6].src)
+ src2 = ip6_normalize(self.pg0.local_ip6_ll)
+ self.assert_equal(src, src2)
+ ia_na = packet[DHCP6OptIA_NA]
+ self.assert_equal(ia_na.T1, 20)
+ self.assert_equal(ia_na.T2, 40)
+ self.assert_equal(len(ia_na.ianaopts), 1)
+ address = ia_na.ianaopts[0]
+ self.assert_equal(address.addr, '1:2:3::5')
+ self.assert_equal(address.preflft, 60)
+ self.assert_equal(address.validlft, 120)
+
+ self.vapi.want_dhcp6_reply_events(enable_disable=1,
+ pid=os.getpid())
+
+ try:
+ ia_na_opts = DHCP6OptIAAddress(addr='7:8::2', preflft=60,
+ validlft=120)
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IPv6(src=util.mk_ll_addr(self.pg0.remote_mac),
+ dst=self.pg0.local_ip6_ll) /
+ UDP(sport=547, dport=546) /
+ DHCP6_Advertise(trid=trid) /
+ DHCP6OptServerId(duid=self.server_duid) /
+ DHCP6OptClientId(duid=client_duid) /
+ DHCP6OptPref(prefval=7) /
+ DHCP6OptStatusCode(statuscode=1) /
+ DHCP6OptIA_NA(iaid=1, T1=20, T2=40, ianaopts=ia_na_opts)
+ )
+ self.pg0.add_stream([p])
+ self.pg_start()
+
+ ev = self.vapi.wait_for_event(1, "dhcp6_reply_event")
+
+ self.assert_equal(ev.preference, 7)
+ self.assert_equal(ev.status_code, 1)
+ self.assert_equal(ev.T1, 20)
+ self.assert_equal(ev.T2, 40)
+
+ reported_address = ev.addresses[0]
+ address = ia_na_opts.getfieldval("addr")
+ self.assert_equal(str(reported_address.address), address)
+ self.assert_equal(reported_address.preferred_time,
+ ia_na_opts.getfieldval("preflft"))
+ self.assert_equal(reported_address.valid_time,
+ ia_na_opts.getfieldval("validlft"))
+
+ finally:
+ self.vapi.want_dhcp6_reply_events(enable_disable=0)
+ self.vapi.dhcp6_clients_enable_disable(enable=0)
+
+ def test_dhcp_pd_send_solicit_receive_advertise(self):
+ """ Verify DHCPv6 PD Solicit packet and Advertise event """
+
+ self.vapi.dhcp6_clients_enable_disable(enable=1)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ prefix = {'prefix': {'address': '1:2:3::', 'len': 50},
+ 'preferred_time': 60,
+ 'valid_time': 120}
+ prefixes = [prefix]
+ self.vapi.dhcp6_pd_send_client_message(
+ server_index=0xffffffff,
+ mrc=1,
+ msg_type=VppEnum.vl_api_dhcpv6_msg_type_t.DHCPV6_MSG_API_SOLICIT,
+ sw_if_index=self.pg0.sw_if_index,
+ T1=20,
+ T2=40,
+ prefixes=prefixes,
+ n_prefixes=len(prefixes))
+ rx_list = self.pg0.get_capture(1)
+ self.assertEqual(len(rx_list), 1)
+ packet = rx_list[0]
+
+ self.assertEqual(packet.haslayer(IPv6), 1)
+ self.assertEqual(packet[IPv6].haslayer(DHCP6_Solicit), 1)
+
+ client_duid = packet[DHCP6OptClientId].duid
+ trid = packet[DHCP6_Solicit].trid
+
+ dst = ip6_normalize(packet[IPv6].dst)
+ dst2 = ip6_normalize("ff02::1:2")
+ self.assert_equal(dst, dst2)
+ src = ip6_normalize(packet[IPv6].src)
+ src2 = ip6_normalize(self.pg0.local_ip6_ll)
+ self.assert_equal(src, src2)
+ ia_pd = packet[DHCP6OptIA_PD]
+ self.assert_equal(ia_pd.T1, 20)
+ self.assert_equal(ia_pd.T2, 40)
+ self.assert_equal(len(ia_pd.iapdopt), 1)
+ prefix = ia_pd.iapdopt[0]
+ self.assert_equal(prefix.prefix, '1:2:3::')
+ self.assert_equal(prefix.plen, 50)
+ self.assert_equal(prefix.preflft, 60)
+ self.assert_equal(prefix.validlft, 120)
+
+ self.vapi.want_dhcp6_pd_reply_events(enable_disable=1,
+ pid=os.getpid())
+
+ try:
+ ia_pd_opts = DHCP6OptIAPrefix(prefix='7:8::', plen=56, preflft=60,
+ validlft=120)
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IPv6(src=util.mk_ll_addr(self.pg0.remote_mac),
+ dst=self.pg0.local_ip6_ll) /
+ UDP(sport=547, dport=546) /
+ DHCP6_Advertise(trid=trid) /
+ DHCP6OptServerId(duid=self.server_duid) /
+ DHCP6OptClientId(duid=client_duid) /
+ DHCP6OptPref(prefval=7) /
+ DHCP6OptStatusCode(statuscode=1) /
+ DHCP6OptIA_PD(iaid=1, T1=20, T2=40, iapdopt=ia_pd_opts)
+ )
+ self.pg0.add_stream([p])
+ self.pg_start()
+
+ ev = self.vapi.wait_for_event(1, "dhcp6_pd_reply_event")
+
+ self.assert_equal(ev.preference, 7)
+ self.assert_equal(ev.status_code, 1)
+ self.assert_equal(ev.T1, 20)
+ self.assert_equal(ev.T2, 40)
+
+ reported_prefix = ev.prefixes[0]
+ prefix = ia_pd_opts.getfieldval("prefix")
+ self.assert_equal(
+ str(reported_prefix.prefix).split('/')[0], prefix)
+ self.assert_equal(int(str(reported_prefix.prefix).split('/')[1]),
+ ia_pd_opts.getfieldval("plen"))
+ self.assert_equal(reported_prefix.preferred_time,
+ ia_pd_opts.getfieldval("preflft"))
+ self.assert_equal(reported_prefix.valid_time,
+ ia_pd_opts.getfieldval("validlft"))
+
+ finally:
+ self.vapi.want_dhcp6_pd_reply_events(enable_disable=0)
+ self.vapi.dhcp6_clients_enable_disable(enable=0)
+
+
+@tag_run_solo
+class TestDHCPv6IANAControlPlane(VppTestCase):
+ """ DHCPv6 IA NA Control Plane Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestDHCPv6IANAControlPlane, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestDHCPv6IANAControlPlane, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestDHCPv6IANAControlPlane, self).setUp()
+
+ self.create_pg_interfaces(range(1))
+ self.interfaces = list(self.pg_interfaces)
+ for i in self.interfaces:
+ i.admin_up()
+
+ self.server_duid = DUID_LL(lladdr=self.pg0.remote_mac)
+ self.client_duid = None
+ self.T1 = 1
+ self.T2 = 2
+
+ fib = self.vapi.ip_route_dump(0, True)
+ self.initial_addresses = set(self.get_interface_addresses(fib,
+ self.pg0))
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ self.vapi.dhcp6_client_enable_disable(sw_if_index=self.pg0.sw_if_index,
+ enable=1)
+
+ def tearDown(self):
+ self.vapi.dhcp6_client_enable_disable(sw_if_index=self.pg0.sw_if_index,
+ enable=0)
+
+ for i in self.interfaces:
+ i.admin_down()
+
+ super(TestDHCPv6IANAControlPlane, self).tearDown()
+
+ @staticmethod
+ def get_interface_addresses(fib, pg):
+ lst = []
+ for entry in fib:
+ if entry.route.prefix.prefixlen == 128:
+ path = entry.route.paths[0]
+ if path.sw_if_index == pg.sw_if_index:
+ lst.append(str(entry.route.prefix.network_address))
+ return lst
+
+ def get_addresses(self):
+ fib = self.vapi.ip_route_dump(0, True)
+ addresses = set(self.get_interface_addresses(fib, self.pg0))
+ return addresses.difference(self.initial_addresses)
+
+ def validate_duid_ll(self, duid):
+ DUID_LL(duid)
+
+ def validate_packet(self, packet, msg_type, is_resend=False):
+ try:
+ self.assertEqual(packet.haslayer(msg_type), 1)
+ client_duid = packet[DHCP6OptClientId].duid
+ if self.client_duid is None:
+ self.client_duid = client_duid
+ self.validate_duid_ll(client_duid)
+ else:
+ self.assertEqual(self.client_duid, client_duid)
+ if msg_type != DHCP6_Solicit and msg_type != DHCP6_Rebind:
+ server_duid = packet[DHCP6OptServerId].duid
+ self.assertEqual(server_duid, self.server_duid)
+ if is_resend:
+ self.assertEqual(self.trid, packet[msg_type].trid)
+ else:
+ self.trid = packet[msg_type].trid
+ ip = packet[IPv6]
+ udp = packet[UDP]
+ self.assertEqual(ip.dst, 'ff02::1:2')
+ self.assertEqual(udp.sport, 546)
+ self.assertEqual(udp.dport, 547)
+ dhcpv6 = packet[msg_type]
+ elapsed_time = dhcpv6[DHCP6OptElapsedTime]
+ if (is_resend):
+ self.assertNotEqual(elapsed_time.elapsedtime, 0)
+ else:
+ self.assertEqual(elapsed_time.elapsedtime, 0)
+ except BaseException:
+ packet.show()
+ raise
+
+ def wait_for_packet(self, msg_type, timeout=None, is_resend=False):
+ if timeout is None:
+ timeout = 3
+ rx_list = self.pg0.get_capture(1, timeout=timeout)
+ packet = rx_list[0]
+ self.validate_packet(packet, msg_type, is_resend=is_resend)
+
+ def wait_for_solicit(self, timeout=None, is_resend=False):
+ self.wait_for_packet(DHCP6_Solicit, timeout, is_resend=is_resend)
+
+ def wait_for_request(self, timeout=None, is_resend=False):
+ self.wait_for_packet(DHCP6_Request, timeout, is_resend=is_resend)
+
+ def wait_for_renew(self, timeout=None, is_resend=False):
+ self.wait_for_packet(DHCP6_Renew, timeout, is_resend=is_resend)
+
+ def wait_for_rebind(self, timeout=None, is_resend=False):
+ self.wait_for_packet(DHCP6_Rebind, timeout, is_resend=is_resend)
+
+ def wait_for_release(self, timeout=None, is_resend=False):
+ self.wait_for_packet(DHCP6_Release, timeout, is_resend=is_resend)
+
+ def send_packet(self, msg_type, t1=None, t2=None, ianaopts=None):
+ if t1 is None:
+ t1 = self.T1
+ if t2 is None:
+ t2 = self.T2
+ if ianaopts is None:
+ opt_ia_na = DHCP6OptIA_NA(iaid=1, T1=t1, T2=t2)
+ else:
+ opt_ia_na = DHCP6OptIA_NA(iaid=1, T1=t1, T2=t2, ianaopts=ianaopts)
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IPv6(src=util.mk_ll_addr(self.pg0.remote_mac),
+ dst=self.pg0.local_ip6_ll) /
+ UDP(sport=547, dport=546) /
+ msg_type(trid=self.trid) /
+ DHCP6OptServerId(duid=self.server_duid) /
+ DHCP6OptClientId(duid=self.client_duid) /
+ opt_ia_na
+ )
+ self.pg0.add_stream([p])
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ def send_advertise(self, t1=None, t2=None, ianaopts=None):
+ self.send_packet(DHCP6_Advertise, t1, t2, ianaopts)
+
+ def send_reply(self, t1=None, t2=None, ianaopts=None):
+ self.send_packet(DHCP6_Reply, t1, t2, ianaopts)
+
+ def test_T1_and_T2_timeouts(self):
+ """ Test T1 and T2 timeouts """
+
+ self.wait_for_solicit()
+ self.send_advertise()
+ self.wait_for_request()
+ self.send_reply()
+
+ self.sleep(1)
+
+ self.wait_for_renew()
+
+ self.pg_enable_capture(self.pg_interfaces)
+
+ self.sleep(1)
+
+ self.wait_for_rebind()
+
+ def test_addresses(self):
+ """ Test handling of addresses """
+
+ ia_na_opts = DHCP6OptIAAddress(addr='7:8::2', preflft=1,
+ validlft=2)
+
+ self.wait_for_solicit()
+ self.send_advertise(t1=20, t2=40, ianaopts=ia_na_opts)
+ self.wait_for_request()
+ self.send_reply(t1=20, t2=40, ianaopts=ia_na_opts)
+ self.sleep(0.1)
+
+ # check FIB for new address
+ new_addresses = self.get_addresses()
+ self.assertEqual(len(new_addresses), 1)
+ addr = list(new_addresses)[0]
+ self.assertEqual(addr, '7:8::2')
+
+ self.sleep(2)
+
+ # check that the address is deleted
+ fib = self.vapi.ip_route_dump(0, True)
+ addresses = set(self.get_interface_addresses(fib, self.pg0))
+ new_addresses = addresses.difference(self.initial_addresses)
+ self.assertEqual(len(new_addresses), 0)
+
+ def test_sending_client_messages_solicit(self):
+ """ VPP receives messages from DHCPv6 client """
+
+ self.wait_for_solicit()
+ self.send_packet(DHCP6_Solicit)
+ self.send_packet(DHCP6_Request)
+ self.send_packet(DHCP6_Renew)
+ self.send_packet(DHCP6_Rebind)
+ self.sleep(1)
+ self.wait_for_solicit(is_resend=True)
+
+ def test_sending_inappropriate_packets(self):
+ """ Server sends messages with inappropriate message types """
+
+ self.wait_for_solicit()
+ self.send_reply()
+ self.wait_for_solicit(is_resend=True)
+ self.send_advertise()
+ self.wait_for_request()
+ self.send_advertise()
+ self.wait_for_request(is_resend=True)
+ self.send_reply()
+ self.wait_for_renew()
+
+ def test_no_address_available_in_advertise(self):
+ """ Advertise message contains NoAddrsAvail status code """
+
+ self.wait_for_solicit()
+ noavail = DHCP6OptStatusCode(statuscode=2) # NoAddrsAvail
+ self.send_advertise(ianaopts=noavail)
+ self.wait_for_solicit(is_resend=True)
+
+ def test_preferred_greater_than_valid_lifetime(self):
+ """ Preferred lifetime is greater than valid lifetime """
+
+ self.wait_for_solicit()
+ self.send_advertise()
+ self.wait_for_request()
+ ia_na_opts = DHCP6OptIAAddress(addr='7:8::2', preflft=4, validlft=3)
+ self.send_reply(ianaopts=ia_na_opts)
+
+ self.sleep(0.5)
+
+ # check FIB contains no addresses
+ fib = self.vapi.ip_route_dump(0, True)
+ addresses = set(self.get_interface_addresses(fib, self.pg0))
+ new_addresses = addresses.difference(self.initial_addresses)
+ self.assertEqual(len(new_addresses), 0)
+
+ def test_T1_greater_than_T2(self):
+ """ T1 is greater than T2 """
+
+ self.wait_for_solicit()
+ self.send_advertise()
+ self.wait_for_request()
+ ia_na_opts = DHCP6OptIAAddress(addr='7:8::2', preflft=4, validlft=8)
+ self.send_reply(t1=80, t2=40, ianaopts=ia_na_opts)
+
+ self.sleep(0.5)
+
+ # check FIB contains no addresses
+ fib = self.vapi.ip_route_dump(0, True)
+ addresses = set(self.get_interface_addresses(fib, self.pg0))
+ new_addresses = addresses.difference(self.initial_addresses)
+ self.assertEqual(len(new_addresses), 0)
+
+
+@tag_fixme_vpp_workers
+class TestDHCPv6PDControlPlane(VppTestCase):
+ """ DHCPv6 PD Control Plane Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestDHCPv6PDControlPlane, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestDHCPv6PDControlPlane, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestDHCPv6PDControlPlane, self).setUp()
+
+ self.create_pg_interfaces(range(2))
+ self.interfaces = list(self.pg_interfaces)
+ for i in self.interfaces:
+ i.admin_up()
+
+ self.server_duid = DUID_LL(lladdr=self.pg0.remote_mac)
+ self.client_duid = None
+ self.T1 = 1
+ self.T2 = 2
+
+ fib = self.vapi.ip_route_dump(0, True)
+ self.initial_addresses = set(self.get_interface_addresses(fib,
+ self.pg1))
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ self.prefix_group = 'my-pd-prefix-group'
+
+ self.vapi.dhcp6_pd_client_enable_disable(
+ enable=1,
+ sw_if_index=self.pg0.sw_if_index,
+ prefix_group=self.prefix_group)
+
+ def tearDown(self):
+ self.vapi.dhcp6_pd_client_enable_disable(self.pg0.sw_if_index,
+ enable=0)
+
+ for i in self.interfaces:
+ i.admin_down()
+
+ super(TestDHCPv6PDControlPlane, self).tearDown()
+
+ @staticmethod
+ def get_interface_addresses(fib, pg):
+ lst = []
+ for entry in fib:
+ if entry.route.prefix.prefixlen == 128:
+ path = entry.route.paths[0]
+ if path.sw_if_index == pg.sw_if_index:
+ lst.append(str(entry.route.prefix.network_address))
+ return lst
+
+ def get_addresses(self):
+ fib = self.vapi.ip_route_dump(0, True)
+ addresses = set(self.get_interface_addresses(fib, self.pg1))
+ return addresses.difference(self.initial_addresses)
+
+ def validate_duid_ll(self, duid):
+ DUID_LL(duid)
+
+ def validate_packet(self, packet, msg_type, is_resend=False):
+ try:
+ self.assertEqual(packet.haslayer(msg_type), 1)
+ client_duid = packet[DHCP6OptClientId].duid
+ if self.client_duid is None:
+ self.client_duid = client_duid
+ self.validate_duid_ll(client_duid)
+ else:
+ self.assertEqual(self.client_duid, client_duid)
+ if msg_type != DHCP6_Solicit and msg_type != DHCP6_Rebind:
+ server_duid = packet[DHCP6OptServerId].duid
+ self.assertEqual(server_duid, self.server_duid)
+ if is_resend:
+ self.assertEqual(self.trid, packet[msg_type].trid)
+ else:
+ self.trid = packet[msg_type].trid
+ ip = packet[IPv6]
+ udp = packet[UDP]
+ self.assertEqual(ip.dst, 'ff02::1:2')
+ self.assertEqual(udp.sport, 546)
+ self.assertEqual(udp.dport, 547)
+ dhcpv6 = packet[msg_type]
+ elapsed_time = dhcpv6[DHCP6OptElapsedTime]
+ if (is_resend):
+ self.assertNotEqual(elapsed_time.elapsedtime, 0)
+ else:
+ self.assertEqual(elapsed_time.elapsedtime, 0)
+ except BaseException:
+ packet.show()
+ raise
+
+ def wait_for_packet(self, msg_type, timeout=None, is_resend=False):
+ if timeout is None:
+ timeout = 3
+ rx_list = self.pg0.get_capture(1, timeout=timeout)
+ packet = rx_list[0]
+ self.validate_packet(packet, msg_type, is_resend=is_resend)
+
+ def wait_for_solicit(self, timeout=None, is_resend=False):
+ self.wait_for_packet(DHCP6_Solicit, timeout, is_resend=is_resend)
+
+ def wait_for_request(self, timeout=None, is_resend=False):
+ self.wait_for_packet(DHCP6_Request, timeout, is_resend=is_resend)
+
+ def wait_for_renew(self, timeout=None, is_resend=False):
+ self.wait_for_packet(DHCP6_Renew, timeout, is_resend=is_resend)
+
+ def wait_for_rebind(self, timeout=None, is_resend=False):
+ self.wait_for_packet(DHCP6_Rebind, timeout, is_resend=is_resend)
+
+ def wait_for_release(self, timeout=None, is_resend=False):
+ self.wait_for_packet(DHCP6_Release, timeout, is_resend=is_resend)
+
+ def send_packet(self, msg_type, t1=None, t2=None, iapdopt=None):
+ if t1 is None:
+ t1 = self.T1
+ if t2 is None:
+ t2 = self.T2
+ if iapdopt is None:
+ opt_ia_pd = DHCP6OptIA_PD(iaid=1, T1=t1, T2=t2)
+ else:
+ opt_ia_pd = DHCP6OptIA_PD(iaid=1, T1=t1, T2=t2, iapdopt=iapdopt)
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IPv6(src=util.mk_ll_addr(self.pg0.remote_mac),
+ dst=self.pg0.local_ip6_ll) /
+ UDP(sport=547, dport=546) /
+ msg_type(trid=self.trid) /
+ DHCP6OptServerId(duid=self.server_duid) /
+ DHCP6OptClientId(duid=self.client_duid) /
+ opt_ia_pd
+ )
+ self.pg0.add_stream([p])
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ def send_advertise(self, t1=None, t2=None, iapdopt=None):
+ self.send_packet(DHCP6_Advertise, t1, t2, iapdopt)
+
+ def send_reply(self, t1=None, t2=None, iapdopt=None):
+ self.send_packet(DHCP6_Reply, t1, t2, iapdopt)
+
+ def test_T1_and_T2_timeouts(self):
+ """ Test T1 and T2 timeouts """
+
+ self.wait_for_solicit()
+ self.send_advertise()
+ self.wait_for_request()
+ self.send_reply()
+
+ self.sleep(1)
+
+ self.wait_for_renew()
+
+ self.pg_enable_capture(self.pg_interfaces)
+
+ self.sleep(1)
+
+ self.wait_for_rebind()
+
+ def test_prefixes(self):
+ """ Test handling of prefixes """
+
+ address1 = '::2:0:0:0:405/60'
+ address2 = '::76:0:0:0:406/62'
+ try:
+ self.vapi.ip6_add_del_address_using_prefix(
+ sw_if_index=self.pg1.sw_if_index,
+ address_with_prefix=address1,
+ prefix_group=self.prefix_group)
+
+ ia_pd_opts = DHCP6OptIAPrefix(prefix='7:8::', plen=56, preflft=2,
+ validlft=3)
+
+ self.wait_for_solicit()
+ self.send_advertise(t1=20, t2=40, iapdopt=ia_pd_opts)
+ self.wait_for_request()
+ self.send_reply(t1=20, t2=40, iapdopt=ia_pd_opts)
+ self.sleep(0.1)
+
+ # check FIB for new address
+ new_addresses = self.get_addresses()
+ self.assertEqual(len(new_addresses), 1)
+ addr = list(new_addresses)[0]
+ self.assertEqual(addr, '7:8:0:2::405')
+
+ self.sleep(1)
+
+ self.vapi.ip6_add_del_address_using_prefix(
+ sw_if_index=self.pg1.sw_if_index,
+ address_with_prefix=address2,
+ prefix_group=self.prefix_group)
+
+ self.sleep(1)
+
+ # check FIB contains 2 addresses
+ fib = self.vapi.ip_route_dump(0, True)
+ addresses = set(self.get_interface_addresses(fib, self.pg1))
+ new_addresses = addresses.difference(self.initial_addresses)
+ self.assertEqual(len(new_addresses), 2)
+ addr1 = list(new_addresses)[0]
+ addr2 = list(new_addresses)[1]
+ if addr1 == '7:8:0:76::406':
+ addr1, addr2 = addr2, addr1
+ self.assertEqual(addr1, '7:8:0:2::405')
+ self.assertEqual(addr2, '7:8:0:76::406')
+
+ self.sleep(1)
+
+ # check that the addresses are deleted
+ fib = self.vapi.ip_route_dump(0, True)
+ addresses = set(self.get_interface_addresses(fib, self.pg1))
+ new_addresses = addresses.difference(self.initial_addresses)
+ self.assertEqual(len(new_addresses), 0)
+
+ finally:
+ if address1 is not None:
+ self.vapi.ip6_add_del_address_using_prefix(
+ sw_if_index=self.pg1.sw_if_index,
+ address_with_prefix=address1,
+ prefix_group=self.prefix_group, is_add=0)
+ if address2 is not None:
+ self.vapi.ip6_add_del_address_using_prefix(
+ sw_if_index=self.pg1.sw_if_index,
+ address_with_prefix=address2,
+ prefix_group=self.prefix_group, is_add=0)
+
+ def test_sending_client_messages_solicit(self):
+ """ VPP receives messages from DHCPv6 client """
+
+ self.wait_for_solicit()
+ self.send_packet(DHCP6_Solicit)
+ self.send_packet(DHCP6_Request)
+ self.send_packet(DHCP6_Renew)
+ self.send_packet(DHCP6_Rebind)
+ self.sleep(1)
+ self.wait_for_solicit(is_resend=True)
+
+ def test_sending_inappropriate_packets(self):
+ """ Server sends messages with inappropriate message types """
+
+ self.wait_for_solicit()
+ self.send_reply()
+ self.wait_for_solicit(is_resend=True)
+ self.send_advertise()
+ self.wait_for_request()
+ self.send_advertise()
+ self.wait_for_request(is_resend=True)
+ self.send_reply()
+ self.wait_for_renew()
+
+ def test_no_prefix_available_in_advertise(self):
+ """ Advertise message contains NoPrefixAvail status code """
+
+ self.wait_for_solicit()
+ noavail = DHCP6OptStatusCode(statuscode=6) # NoPrefixAvail
+ self.send_advertise(iapdopt=noavail)
+ self.wait_for_solicit(is_resend=True)
+
+ def test_preferred_greater_than_valid_lifetime(self):
+ """ Preferred lifetime is greater than valid lifetime """
+
+ address1 = '::2:0:0:0:405/60'
+ try:
+ self.vapi.ip6_add_del_address_using_prefix(
+ sw_if_index=self.pg1.sw_if_index,
+ address_with_prefix=address1,
+ prefix_group=self.prefix_group)
+
+ self.wait_for_solicit()
+ self.send_advertise()
+ self.wait_for_request()
+ ia_pd_opts = DHCP6OptIAPrefix(prefix='7:8::', plen=56, preflft=4,
+ validlft=3)
+ self.send_reply(iapdopt=ia_pd_opts)
+
+ self.sleep(0.5)
+
+ # check FIB contains no addresses
+ fib = self.vapi.ip_route_dump(0, True)
+ addresses = set(self.get_interface_addresses(fib, self.pg1))
+ new_addresses = addresses.difference(self.initial_addresses)
+ self.assertEqual(len(new_addresses), 0)
+
+ finally:
+ self.vapi.ip6_add_del_address_using_prefix(
+ sw_if_index=self.pg1.sw_if_index,
+ address_with_prefix=address1,
+ prefix_group=self.prefix_group,
+ is_add=0)
+
+ def test_T1_greater_than_T2(self):
+ """ T1 is greater than T2 """
+
+ address1 = '::2:0:0:0:405/60'
+ try:
+ self.vapi.ip6_add_del_address_using_prefix(
+ sw_if_index=self.pg1.sw_if_index,
+ address_with_prefix=address1,
+ prefix_group=self.prefix_group)
+
+ self.wait_for_solicit()
+ self.send_advertise()
+ self.wait_for_request()
+ ia_pd_opts = DHCP6OptIAPrefix(prefix='7:8::', plen=56, preflft=4,
+ validlft=8)
+ self.send_reply(t1=80, t2=40, iapdopt=ia_pd_opts)
+
+ self.sleep(0.5)
+
+ # check FIB contains no addresses
+ fib = self.vapi.ip_route_dump(0, True)
+ addresses = set(self.get_interface_addresses(fib, self.pg1))
+ new_addresses = addresses.difference(self.initial_addresses)
+ self.assertEqual(len(new_addresses), 0)
+
+ finally:
+ self.vapi.ip6_add_del_address_using_prefix(
+ sw_if_index=self.pg1.sw_if_index,
+ prefix_group=self.prefix_group,
+ address_with_prefix=address1,
+ is_add=False)
diff --git a/test/test_dns.py b/test/test_dns.py
new file mode 100644
index 00000000000..fb8958c511b
--- /dev/null
+++ b/test/test_dns.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python3
+
+import unittest
+
+from framework import VppTestCase, VppTestRunner
+from vpp_ip_route import VppIpTable, VppIpRoute, VppRoutePath
+from ipaddress import *
+
+import scapy.compat
+from scapy.contrib.mpls import MPLS
+from scapy.layers.inet import IP, UDP, TCP, ICMP, icmptypes, icmpcodes
+from scapy.layers.l2 import Ether
+from scapy.packet import Raw
+from scapy.layers.dns import DNSRR, DNS, DNSQR
+
+
+class TestDns(VppTestCase):
+ """ Dns Test Cases """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestDns, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestDns, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestDns, self).setUp()
+
+ self.create_pg_interfaces(range(1))
+
+ for i in self.pg_interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+
+ def tearDown(self):
+ super(TestDns, self).tearDown()
+
+ def create_stream(self, src_if):
+ """Create input packet stream for defined interface.
+
+ :param VppInterface src_if: Interface to create packet stream for.
+ """
+ good_request = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ IP(src=src_if.remote_ip4) /
+ UDP(sport=1234, dport=53) /
+ DNS(rd=1, qd=DNSQR(qname="bozo.clown.org")))
+
+ bad_request = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ IP(src=src_if.remote_ip4) /
+ UDP(sport=1234, dport=53) /
+ DNS(rd=1, qd=DNSQR(qname="no.clown.org")))
+ pkts = [good_request, bad_request]
+ return pkts
+
+ def verify_capture(self, dst_if, capture):
+ """Verify captured input packet stream for defined interface.
+
+ :param VppInterface dst_if: Interface to verify captured packet stream
+ for.
+ :param list capture: Captured packet stream.
+ """
+ self.logger.info("Verifying capture on interface %s" % dst_if.name)
+ for packet in capture:
+ dns = packet[DNS]
+ self.assertEqual(dns.an[0].rdata, '1.2.3.4')
+
+ def test_dns_unittest(self):
+ """ DNS Name Resolver Basic Functional Test """
+
+ # Set up an upstream name resolver. We won't actually go there
+ self.vapi.dns_name_server_add_del(
+ is_ip6=0, is_add=1, server_address=IPv4Address(u'8.8.8.8').packed)
+
+ # Enable name resolution
+ self.vapi.dns_enable_disable(enable=1)
+
+ # Manually add a static dns cache entry
+ self.logger.info(self.vapi.cli("dns cache add bozo.clown.org 1.2.3.4"))
+
+ # Test the binary API
+ rv = self.vapi.dns_resolve_name(name=b'bozo.clown.org')
+ self.assertEqual(rv.ip4_address, IPv4Address(u'1.2.3.4').packed)
+
+ # Configure 127.0.0.1/8 on the pg interface
+ self.vapi.sw_interface_add_del_address(
+ sw_if_index=self.pg0.sw_if_index,
+ prefix="127.0.0.1/8")
+
+ # Send a couple of DNS request packets, one for bozo.clown.org
+ # and one for no.clown.org which won't resolve
+
+ pkts = self.create_stream(self.pg0)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+
+ self.pg_start()
+ pkts = self.pg0.get_capture(1)
+ self.verify_capture(self.pg0, pkts)
+
+ # Make sure that the cache contents are correct
+ str = self.vapi.cli("show dns cache verbose")
+ self.assertIn('1.2.3.4', str)
+ self.assertIn('[P] no.clown.org:', str)
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_dslite.py b/test/test_dslite.py
new file mode 100644
index 00000000000..2b4f4aacc9f
--- /dev/null
+++ b/test/test_dslite.py
@@ -0,0 +1,341 @@
+#!/usr/bin/env python3
+
+import socket
+import unittest
+import struct
+import random
+
+from framework import tag_fixme_vpp_workers
+from framework import VppTestCase, VppTestRunner, running_extended_tests
+
+import scapy.compat
+from scapy.layers.inet import IP, TCP, UDP, ICMP
+from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror
+from scapy.layers.inet6 import IPv6, ICMPv6EchoRequest, ICMPv6EchoReply, \
+ ICMPv6ND_NS, ICMPv6ND_NA, ICMPv6NDOptDstLLAddr, fragment6
+from scapy.layers.inet6 import ICMPv6DestUnreach, IPerror6, IPv6ExtHdrFragment
+from scapy.layers.l2 import Ether, ARP, GRE
+from scapy.data import IP_PROTOS
+from scapy.packet import bind_layers, Raw
+from util import ppp
+from ipfix import IPFIX, Set, Template, Data, IPFIXDecoder
+from time import sleep
+from util import ip4_range
+from vpp_papi import mac_pton
+from syslog_rfc5424_parser import SyslogMessage, ParseError
+from syslog_rfc5424_parser.constants import SyslogFacility, SyslogSeverity
+from io import BytesIO
+from vpp_papi import VppEnum
+from vpp_ip_route import VppIpRoute, VppRoutePath, FibPathType
+from vpp_neighbor import VppNeighbor
+from scapy.all import bind_layers, Packet, ByteEnumField, ShortField, \
+ IPField, IntField, LongField, XByteField, FlagsField, FieldLenField, \
+ PacketListField
+from ipaddress import IPv6Network
+
+
+@tag_fixme_vpp_workers
+class TestDSlite(VppTestCase):
+ """ DS-Lite Test Cases """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestDSlite, cls).setUpClass()
+
+ try:
+ cls.nat_addr = '10.0.0.3'
+
+ cls.create_pg_interfaces(range(3))
+ cls.pg0.admin_up()
+ cls.pg0.config_ip4()
+ cls.pg0.resolve_arp()
+ cls.pg1.admin_up()
+ cls.pg1.config_ip6()
+ cls.pg1.generate_remote_hosts(2)
+ cls.pg1.configure_ipv6_neighbors()
+ cls.pg2.admin_up()
+ cls.pg2.config_ip4()
+ cls.pg2.resolve_arp()
+
+ except Exception:
+ super(TestDSlite, cls).tearDownClass()
+ raise
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestDSlite, cls).tearDownClass()
+
+ def verify_syslog_apmadd(self, data, isaddr, isport, xsaddr, xsport,
+ sv6enc, proto):
+ message = data.decode('utf-8')
+ try:
+ message = SyslogMessage.parse(message)
+ except ParseError as e:
+ self.logger.error(e)
+ else:
+ self.assertEqual(message.severity, SyslogSeverity.info)
+ self.assertEqual(message.appname, 'NAT')
+ self.assertEqual(message.msgid, 'APMADD')
+ sd_params = message.sd.get('napmap')
+ self.assertTrue(sd_params is not None)
+ self.assertEqual(sd_params.get('IATYP'), 'IPv4')
+ self.assertEqual(sd_params.get('ISADDR'), isaddr)
+ self.assertEqual(sd_params.get('ISPORT'), "%d" % isport)
+ self.assertEqual(sd_params.get('XATYP'), 'IPv4')
+ self.assertEqual(sd_params.get('XSADDR'), xsaddr)
+ self.assertEqual(sd_params.get('XSPORT'), "%d" % xsport)
+ self.assertEqual(sd_params.get('PROTO'), "%d" % proto)
+ self.assertTrue(sd_params.get('SSUBIX') is not None)
+ self.assertEqual(sd_params.get('SV6ENC'), sv6enc)
+
+ def test_dslite(self):
+ """ Test DS-Lite """
+ nat_config = self.vapi.nat_show_config()
+ self.assertEqual(0, nat_config.dslite_ce)
+
+ self.vapi.dslite_add_del_pool_addr_range(start_addr=self.nat_addr,
+ end_addr=self.nat_addr,
+ is_add=1)
+ aftr_ip4 = '192.0.0.1'
+ aftr_ip6 = '2001:db8:85a3::8a2e:370:1'
+ self.vapi.dslite_set_aftr_addr(ip4_addr=aftr_ip4, ip6_addr=aftr_ip6)
+ self.vapi.syslog_set_sender(self.pg2.local_ip4, self.pg2.remote_ip4)
+
+ # UDP
+ p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IPv6(dst=aftr_ip6, src=self.pg1.remote_hosts[0].ip6) /
+ IP(dst=self.pg0.remote_ip4, src='192.168.1.1') /
+ UDP(sport=20000, dport=10000))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ capture = capture[0]
+ self.assertFalse(capture.haslayer(IPv6))
+ self.assertEqual(capture[IP].src, self.nat_addr)
+ self.assertEqual(capture[IP].dst, self.pg0.remote_ip4)
+ self.assertNotEqual(capture[UDP].sport, 20000)
+ self.assertEqual(capture[UDP].dport, 10000)
+ self.assert_packet_checksums_valid(capture)
+ out_port = capture[UDP].sport
+ capture = self.pg2.get_capture(1)
+ self.verify_syslog_apmadd(capture[0][Raw].load, '192.168.1.1',
+ 20000, self.nat_addr, out_port,
+ self.pg1.remote_hosts[0].ip6, IP_PROTOS.udp)
+
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(dst=self.nat_addr, src=self.pg0.remote_ip4) /
+ UDP(sport=10000, dport=out_port))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ capture = capture[0]
+ self.assertEqual(capture[IPv6].src, aftr_ip6)
+ self.assertEqual(capture[IPv6].dst, self.pg1.remote_hosts[0].ip6)
+ self.assertEqual(capture[IP].src, self.pg0.remote_ip4)
+ self.assertEqual(capture[IP].dst, '192.168.1.1')
+ self.assertEqual(capture[UDP].sport, 10000)
+ self.assertEqual(capture[UDP].dport, 20000)
+ self.assert_packet_checksums_valid(capture)
+
+ # TCP
+ p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IPv6(dst=aftr_ip6, src=self.pg1.remote_hosts[1].ip6) /
+ IP(dst=self.pg0.remote_ip4, src='192.168.1.1') /
+ TCP(sport=20001, dport=10001))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ capture = capture[0]
+ self.assertFalse(capture.haslayer(IPv6))
+ self.assertEqual(capture[IP].src, self.nat_addr)
+ self.assertEqual(capture[IP].dst, self.pg0.remote_ip4)
+ self.assertNotEqual(capture[TCP].sport, 20001)
+ self.assertEqual(capture[TCP].dport, 10001)
+ self.assert_packet_checksums_valid(capture)
+ out_port = capture[TCP].sport
+
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(dst=self.nat_addr, src=self.pg0.remote_ip4) /
+ TCP(sport=10001, dport=out_port))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ capture = capture[0]
+ self.assertEqual(capture[IPv6].src, aftr_ip6)
+ self.assertEqual(capture[IPv6].dst, self.pg1.remote_hosts[1].ip6)
+ self.assertEqual(capture[IP].src, self.pg0.remote_ip4)
+ self.assertEqual(capture[IP].dst, '192.168.1.1')
+ self.assertEqual(capture[TCP].sport, 10001)
+ self.assertEqual(capture[TCP].dport, 20001)
+ self.assert_packet_checksums_valid(capture)
+
+ # ICMP
+ p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IPv6(dst=aftr_ip6, src=self.pg1.remote_hosts[1].ip6) /
+ IP(dst=self.pg0.remote_ip4, src='192.168.1.1') /
+ ICMP(id=4000, type='echo-request'))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ capture = capture[0]
+ self.assertFalse(capture.haslayer(IPv6))
+ self.assertEqual(capture[IP].src, self.nat_addr)
+ self.assertEqual(capture[IP].dst, self.pg0.remote_ip4)
+ self.assertNotEqual(capture[ICMP].id, 4000)
+ self.assert_packet_checksums_valid(capture)
+ out_id = capture[ICMP].id
+
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(dst=self.nat_addr, src=self.pg0.remote_ip4) /
+ ICMP(id=out_id, type='echo-reply'))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ capture = capture[0]
+ self.assertEqual(capture[IPv6].src, aftr_ip6)
+ self.assertEqual(capture[IPv6].dst, self.pg1.remote_hosts[1].ip6)
+ self.assertEqual(capture[IP].src, self.pg0.remote_ip4)
+ self.assertEqual(capture[IP].dst, '192.168.1.1')
+ self.assertEqual(capture[ICMP].id, 4000)
+ self.assert_packet_checksums_valid(capture)
+
+ # ping DS-Lite AFTR tunnel endpoint address
+ p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IPv6(src=self.pg1.remote_hosts[1].ip6, dst=aftr_ip6) /
+ ICMPv6EchoRequest())
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ capture = capture[0]
+ self.assertEqual(capture[IPv6].src, aftr_ip6)
+ self.assertEqual(capture[IPv6].dst, self.pg1.remote_hosts[1].ip6)
+ self.assertTrue(capture.haslayer(ICMPv6EchoReply))
+
+ b4s = self.statistics.get_counter('/dslite/total-b4s')
+ self.assertEqual(b4s[0][0], 2)
+ sessions = self.statistics.get_counter('/dslite/total-sessions')
+ self.assertEqual(sessions[0][0], 3)
+
+ def tearDown(self):
+ super(TestDSlite, self).tearDown()
+
+ def show_commands_at_teardown(self):
+ self.logger.info(self.vapi.cli("show dslite pool"))
+ self.logger.info(
+ self.vapi.cli("show dslite aftr-tunnel-endpoint-address"))
+ self.logger.info(self.vapi.cli("show dslite sessions"))
+
+
+class TestDSliteCE(VppTestCase):
+ """ DS-Lite CE Test Cases """
+
+ @classmethod
+ def setUpConstants(cls):
+ super(TestDSliteCE, cls).setUpConstants()
+ cls.vpp_cmdline.extend(["dslite", "{", "ce", "}"])
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestDSliteCE, cls).setUpClass()
+
+ try:
+ cls.create_pg_interfaces(range(2))
+ cls.pg0.admin_up()
+ cls.pg0.config_ip4()
+ cls.pg0.resolve_arp()
+ cls.pg1.admin_up()
+ cls.pg1.config_ip6()
+ cls.pg1.generate_remote_hosts(1)
+ cls.pg1.configure_ipv6_neighbors()
+
+ except Exception:
+ super(TestDSliteCE, cls).tearDownClass()
+ raise
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestDSliteCE, cls).tearDownClass()
+
+ def test_dslite_ce(self):
+ """ Test DS-Lite CE """
+
+ # TODO: add message to retrieve dslite config
+ # nat_config = self.vapi.nat_show_config()
+ # self.assertEqual(1, nat_config.dslite_ce)
+
+ b4_ip4 = '192.0.0.2'
+ b4_ip6 = '2001:db8:62aa::375e:f4c1:1'
+ self.vapi.dslite_set_b4_addr(ip4_addr=b4_ip4, ip6_addr=b4_ip6)
+
+ aftr_ip4 = '192.0.0.1'
+ aftr_ip6 = '2001:db8:85a3::8a2e:370:1'
+ aftr_ip6_n = socket.inet_pton(socket.AF_INET6, aftr_ip6)
+ self.vapi.dslite_set_aftr_addr(ip4_addr=aftr_ip4, ip6_addr=aftr_ip6)
+
+ r1 = VppIpRoute(self, aftr_ip6, 128,
+ [VppRoutePath(self.pg1.remote_ip6,
+ self.pg1.sw_if_index)])
+ r1.add_vpp_config()
+
+ # UDP encapsulation
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(dst=self.pg1.remote_ip4, src=self.pg0.remote_ip4) /
+ UDP(sport=10000, dport=20000))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ capture = capture[0]
+ self.assertEqual(capture[IPv6].src, b4_ip6)
+ self.assertEqual(capture[IPv6].dst, aftr_ip6)
+ self.assertEqual(capture[IP].src, self.pg0.remote_ip4)
+ self.assertEqual(capture[IP].dst, self.pg1.remote_ip4)
+ self.assertEqual(capture[UDP].sport, 10000)
+ self.assertEqual(capture[UDP].dport, 20000)
+ self.assert_packet_checksums_valid(capture)
+
+ # UDP decapsulation
+ p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IPv6(dst=b4_ip6, src=aftr_ip6) /
+ IP(dst=self.pg0.remote_ip4, src=self.pg1.remote_ip4) /
+ UDP(sport=20000, dport=10000))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ capture = capture[0]
+ self.assertFalse(capture.haslayer(IPv6))
+ self.assertEqual(capture[IP].src, self.pg1.remote_ip4)
+ self.assertEqual(capture[IP].dst, self.pg0.remote_ip4)
+ self.assertEqual(capture[UDP].sport, 20000)
+ self.assertEqual(capture[UDP].dport, 10000)
+ self.assert_packet_checksums_valid(capture)
+
+ # ping DS-Lite B4 tunnel endpoint address
+ p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IPv6(src=self.pg1.remote_hosts[0].ip6, dst=b4_ip6) /
+ ICMPv6EchoRequest())
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ capture = capture[0]
+ self.assertEqual(capture[IPv6].src, b4_ip6)
+ self.assertEqual(capture[IPv6].dst, self.pg1.remote_hosts[0].ip6)
+ self.assertTrue(capture.haslayer(ICMPv6EchoReply))
+
+ def tearDown(self):
+ super(TestDSliteCE, self).tearDown()
+
+ def show_commands_at_teardown(self):
+ self.logger.info(
+ self.vapi.cli("show dslite aftr-tunnel-endpoint-address"))
+ self.logger.info(
+ self.vapi.cli("show dslite b4-tunnel-endpoint-address"))
diff --git a/test/test_dvr.py b/test/test_dvr.py
new file mode 100644
index 00000000000..8531b8553ca
--- /dev/null
+++ b/test/test_dvr.py
@@ -0,0 +1,410 @@
+#!/usr/bin/env python3
+import unittest
+
+from framework import VppTestCase, VppTestRunner
+from vpp_ip_route import VppIpRoute, VppRoutePath, FibPathType
+from vpp_l2 import L2_PORT_TYPE
+from vpp_sub_interface import L2_VTR_OP, VppDot1QSubint
+from vpp_acl import AclRule, VppAcl, VppAclInterface
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether, Dot1Q
+from scapy.layers.inet import IP, UDP
+from socket import AF_INET, inet_pton
+from ipaddress import IPv4Network
+
+NUM_PKTS = 67
+
+
+class TestDVR(VppTestCase):
+ """ Distributed Virtual Router """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestDVR, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestDVR, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestDVR, self).setUp()
+
+ self.create_pg_interfaces(range(4))
+ self.create_loopback_interfaces(1)
+
+ for i in self.pg_interfaces:
+ i.admin_up()
+
+ self.loop0.config_ip4()
+
+ def tearDown(self):
+ for i in self.pg_interfaces:
+ i.admin_down()
+ self.loop0.unconfig_ip4()
+
+ super(TestDVR, self).tearDown()
+
+ def assert_same_mac_addr(self, tx, rx):
+ t_eth = tx[Ether]
+ for p in rx:
+ r_eth = p[Ether]
+ self.assertEqual(t_eth.src, r_eth.src)
+ self.assertEqual(t_eth.dst, r_eth.dst)
+
+ def assert_has_vlan_tag(self, tag, rx):
+ for p in rx:
+ r_1q = p[Dot1Q]
+ self.assertEqual(tag, r_1q.vlan)
+
+ def assert_has_no_tag(self, rx):
+ for p in rx:
+ self.assertFalse(p.haslayer(Dot1Q))
+
+ def test_dvr(self):
+ """ Distributed Virtual Router """
+
+ #
+ # A packet destined to an IP address that is L2 bridged via
+ # a non-tag interface
+ #
+ ip_non_tag_bridged = "10.10.10.10"
+ ip_tag_bridged = "10.10.10.11"
+ any_src_addr = "1.1.1.1"
+
+ pkt_no_tag = (Ether(src=self.pg0.remote_mac,
+ dst=self.loop0.local_mac) /
+ IP(src=any_src_addr,
+ dst=ip_non_tag_bridged) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ pkt_tag = (Ether(src=self.pg0.remote_mac,
+ dst=self.loop0.local_mac) /
+ IP(src=any_src_addr,
+ dst=ip_tag_bridged) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ #
+ # Two sub-interfaces so we can test VLAN tag push/pop
+ #
+ sub_if_on_pg2 = VppDot1QSubint(self, self.pg2, 92)
+ sub_if_on_pg3 = VppDot1QSubint(self, self.pg3, 93)
+ sub_if_on_pg2.admin_up()
+ sub_if_on_pg3.admin_up()
+
+ #
+ # Put all the interfaces into a new bridge domain
+ #
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=self.pg0.sw_if_index, bd_id=1)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=self.pg1.sw_if_index, bd_id=1)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=sub_if_on_pg2.sw_if_index, bd_id=1)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=sub_if_on_pg3.sw_if_index, bd_id=1)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=self.loop0.sw_if_index, bd_id=1,
+ port_type=L2_PORT_TYPE.BVI)
+
+ self.vapi.l2_interface_vlan_tag_rewrite(
+ sw_if_index=sub_if_on_pg2.sw_if_index, vtr_op=L2_VTR_OP.L2_POP_1,
+ push_dot1q=92)
+ self.vapi.l2_interface_vlan_tag_rewrite(
+ sw_if_index=sub_if_on_pg3.sw_if_index, vtr_op=L2_VTR_OP.L2_POP_1,
+ push_dot1q=93)
+
+ #
+ # Add routes to bridge the traffic via a tagged an nontagged interface
+ #
+ route_no_tag = VppIpRoute(
+ self, ip_non_tag_bridged, 32,
+ [VppRoutePath("0.0.0.0",
+ self.pg1.sw_if_index,
+ type=FibPathType.FIB_PATH_TYPE_DVR)])
+ route_no_tag.add_vpp_config()
+
+ #
+ # Inject the packet that arrives and leaves on a non-tagged interface
+ # Since it's 'bridged' expect that the MAC headed is unchanged.
+ #
+ rx = self.send_and_expect(self.pg0, pkt_no_tag * NUM_PKTS, self.pg1)
+ self.assert_same_mac_addr(pkt_no_tag, rx)
+ self.assert_has_no_tag(rx)
+
+ #
+ # Add routes to bridge the traffic via a tagged interface
+ #
+ route_with_tag = VppIpRoute(
+ self, ip_tag_bridged, 32,
+ [VppRoutePath("0.0.0.0",
+ sub_if_on_pg3.sw_if_index,
+ type=FibPathType.FIB_PATH_TYPE_DVR)])
+ route_with_tag.add_vpp_config()
+
+ #
+ # Inject the packet that arrives non-tag and leaves on a tagged
+ # interface
+ #
+ rx = self.send_and_expect(self.pg0, pkt_tag * NUM_PKTS, self.pg3)
+ self.assert_same_mac_addr(pkt_tag, rx)
+ self.assert_has_vlan_tag(93, rx)
+
+ #
+ # Tag to tag
+ #
+ pkt_tag_to_tag = (Ether(src=self.pg2.remote_mac,
+ dst=self.loop0.local_mac) /
+ Dot1Q(vlan=92) /
+ IP(src=any_src_addr,
+ dst=ip_tag_bridged) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2,
+ pkt_tag_to_tag * NUM_PKTS,
+ self.pg3)
+ self.assert_same_mac_addr(pkt_tag_to_tag, rx)
+ self.assert_has_vlan_tag(93, rx)
+
+ #
+ # Tag to non-Tag
+ #
+ pkt_tag_to_non_tag = (Ether(src=self.pg2.remote_mac,
+ dst=self.loop0.local_mac) /
+ Dot1Q(vlan=92) /
+ IP(src=any_src_addr,
+ dst=ip_non_tag_bridged) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2,
+ pkt_tag_to_non_tag * NUM_PKTS,
+ self.pg1)
+ self.assert_same_mac_addr(pkt_tag_to_tag, rx)
+ self.assert_has_no_tag(rx)
+
+ #
+ # Add an output L3 ACL that will block the traffic
+ #
+ rule_1 = AclRule(is_permit=0, proto=17, ports=1234,
+ src_prefix=IPv4Network((any_src_addr, 32)),
+ dst_prefix=IPv4Network((ip_non_tag_bridged, 32)))
+ acl = VppAcl(self, rules=[rule_1])
+ acl.add_vpp_config()
+
+ #
+ # Apply the ACL on the output interface
+ #
+ acl_if1 = VppAclInterface(self, sw_if_index=self.pg1.sw_if_index,
+ n_input=0, acls=[acl])
+ acl_if1.add_vpp_config()
+
+ #
+ # Send packet's that should match the ACL and be dropped
+ #
+ rx = self.send_and_assert_no_replies(self.pg2,
+ pkt_tag_to_non_tag * NUM_PKTS)
+
+ #
+ # cleanup
+ #
+ acl_if1.remove_vpp_config()
+ acl.remove_vpp_config()
+
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=self.pg0.sw_if_index, bd_id=1, enable=0)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=self.pg1.sw_if_index, bd_id=1, enable=0)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=sub_if_on_pg2.sw_if_index, bd_id=1, enable=0)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=sub_if_on_pg3.sw_if_index, bd_id=1, enable=0)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=self.loop0.sw_if_index, bd_id=1,
+ port_type=L2_PORT_TYPE.BVI, enable=0)
+
+ #
+ # Do a FIB dump to make sure the paths are correctly reported as DVR
+ #
+ routes = self.vapi.ip_route_dump(0)
+
+ for r in routes:
+ if (ip_tag_bridged == str(r.route.prefix.network_address)):
+ self.assertEqual(r.route.paths[0].sw_if_index,
+ sub_if_on_pg3.sw_if_index)
+ self.assertEqual(r.route.paths[0].type,
+ FibPathType.FIB_PATH_TYPE_DVR)
+ if (ip_non_tag_bridged == str(r.route.prefix.network_address)):
+ self.assertEqual(r.route.paths[0].sw_if_index,
+ self.pg1.sw_if_index)
+ self.assertEqual(r.route.paths[0].type,
+ FibPathType.FIB_PATH_TYPE_DVR)
+
+ #
+ # the explicit route delete is require so it happens before
+ # the sbu-interface delete. subinterface delete is required
+ # because that object type does not use the object registry
+ #
+ route_no_tag.remove_vpp_config()
+ route_with_tag.remove_vpp_config()
+ sub_if_on_pg3.remove_vpp_config()
+ sub_if_on_pg2.remove_vpp_config()
+
+ def test_l2_emulation(self):
+ """ L2 Emulation """
+
+ #
+ # non distinct L3 packets, in the tag/non-tag combos
+ #
+ pkt_no_tag = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg1.remote_mac) /
+ IP(src="2.2.2.2",
+ dst="1.1.1.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ pkt_to_tag = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg2.remote_mac) /
+ IP(src="2.2.2.2",
+ dst="1.1.1.2") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ pkt_from_tag = (Ether(src=self.pg3.remote_mac,
+ dst=self.pg2.remote_mac) /
+ Dot1Q(vlan=93) /
+ IP(src="2.2.2.2",
+ dst="1.1.1.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ pkt_from_to_tag = (Ether(src=self.pg3.remote_mac,
+ dst=self.pg2.remote_mac) /
+ Dot1Q(vlan=93) /
+ IP(src="2.2.2.2",
+ dst="1.1.1.2") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ pkt_bcast = (Ether(src=self.pg0.remote_mac,
+ dst="ff:ff:ff:ff:ff:ff") /
+ IP(src="2.2.2.2",
+ dst="255.255.255.255") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ #
+ # A couple of sub-interfaces for tags
+ #
+ sub_if_on_pg2 = VppDot1QSubint(self, self.pg2, 92)
+ sub_if_on_pg3 = VppDot1QSubint(self, self.pg3, 93)
+ sub_if_on_pg2.admin_up()
+ sub_if_on_pg3.admin_up()
+
+ #
+ # Put all the interfaces into a new bridge domain
+ #
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=self.pg0.sw_if_index, bd_id=1)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=self.pg1.sw_if_index, bd_id=1)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=sub_if_on_pg2.sw_if_index, bd_id=1)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=sub_if_on_pg3.sw_if_index, bd_id=1)
+ self.vapi.l2_interface_vlan_tag_rewrite(
+ sw_if_index=sub_if_on_pg2.sw_if_index, vtr_op=L2_VTR_OP.L2_POP_1,
+ push_dot1q=92)
+ self.vapi.l2_interface_vlan_tag_rewrite(
+ sw_if_index=sub_if_on_pg3.sw_if_index, vtr_op=L2_VTR_OP.L2_POP_1,
+ push_dot1q=93)
+
+ #
+ # Disable UU flooding, learning and ARP termination. makes this test
+ # easier as unicast packets are dropped if not extracted.
+ #
+ self.vapi.bridge_flags(bd_id=1, is_set=0,
+ flags=(1 << 0) | (1 << 3) | (1 << 4))
+
+ #
+ # Add a DVR route to steer traffic at L3
+ #
+ route_1 = VppIpRoute(
+ self, "1.1.1.1", 32,
+ [VppRoutePath("0.0.0.0",
+ self.pg1.sw_if_index,
+ type=FibPathType.FIB_PATH_TYPE_DVR)])
+ route_2 = VppIpRoute(
+ self, "1.1.1.2", 32,
+ [VppRoutePath("0.0.0.0",
+ sub_if_on_pg2.sw_if_index,
+ type=FibPathType.FIB_PATH_TYPE_DVR)])
+ route_1.add_vpp_config()
+ route_2.add_vpp_config()
+
+ #
+ # packets are dropped because bridge does not flood unknown unicast
+ #
+ self.send_and_assert_no_replies(self.pg0, pkt_no_tag)
+
+ #
+ # Enable L3 extraction on pgs
+ #
+ self.vapi.l2_emulation(self.pg0.sw_if_index)
+ self.vapi.l2_emulation(self.pg1.sw_if_index)
+ self.vapi.l2_emulation(sub_if_on_pg2.sw_if_index)
+ self.vapi.l2_emulation(sub_if_on_pg3.sw_if_index)
+
+ #
+ # now we expect the packet forward according to the DVR route
+ #
+ rx = self.send_and_expect(self.pg0, pkt_no_tag * NUM_PKTS, self.pg1)
+ self.assert_same_mac_addr(pkt_no_tag, rx)
+ self.assert_has_no_tag(rx)
+
+ rx = self.send_and_expect(self.pg0, pkt_to_tag * NUM_PKTS, self.pg2)
+ self.assert_same_mac_addr(pkt_to_tag, rx)
+ self.assert_has_vlan_tag(92, rx)
+
+ rx = self.send_and_expect(self.pg3, pkt_from_tag * NUM_PKTS, self.pg1)
+ self.assert_same_mac_addr(pkt_from_tag, rx)
+ self.assert_has_no_tag(rx)
+
+ rx = self.send_and_expect(self.pg3,
+ pkt_from_to_tag * NUM_PKTS,
+ self.pg2)
+ self.assert_same_mac_addr(pkt_from_tag, rx)
+ self.assert_has_vlan_tag(92, rx)
+
+ #
+ # but broadcast packets are still flooded
+ #
+ self.send_and_expect(self.pg0, pkt_bcast * 33, self.pg2)
+
+ #
+ # cleanup
+ #
+ self.vapi.l2_emulation(self.pg0.sw_if_index,
+ enable=0)
+ self.vapi.l2_emulation(self.pg1.sw_if_index,
+ enable=0)
+ self.vapi.l2_emulation(sub_if_on_pg2.sw_if_index,
+ enable=0)
+ self.vapi.l2_emulation(sub_if_on_pg3.sw_if_index,
+ enable=0)
+
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=self.pg0.sw_if_index, bd_id=1, enable=0)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=self.pg1.sw_if_index, bd_id=1, enable=0)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=sub_if_on_pg2.sw_if_index, bd_id=1, enable=0)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=sub_if_on_pg3.sw_if_index, bd_id=1, enable=0)
+
+ route_1.remove_vpp_config()
+ route_2.remove_vpp_config()
+ sub_if_on_pg3.remove_vpp_config()
+ sub_if_on_pg2.remove_vpp_config()
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_endian.py b/test/test_endian.py
new file mode 100644
index 00000000000..462ee2b6b57
--- /dev/null
+++ b/test/test_endian.py
@@ -0,0 +1,38 @@
+# Copyright (c) 2019. Vinci Consulting Corp. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import framework
+import vpp_papi_provider
+
+F64_ONE = 1.0
+
+
+class TestEndian(framework.VppTestCase):
+ """TestEndian"""
+
+ def test_f64_endian_value(self):
+ try:
+ rv = self.vapi.get_f64_endian_value(f64_one=F64_ONE)
+ self.assertEqual(rv.f64_one_result, F64_ONE,
+ "client incorrectly deserializes f64 values. "
+ "Expected: %r. Received: %r." % (
+ F64_ONE, rv.f64_one_result))
+ except vpp_papi_provider.UnexpectedApiReturnValueError:
+ self.fail('client incorrectly serializes f64 values.')
+
+ def test_get_f64_increment_by_one(self):
+ expected = 43.0
+ rv = self.vapi.get_f64_increment_by_one(f64_value=42.0)
+ self.assertEqual(rv.f64_value, expected, 'Expected %r, received:%r.'
+ % (expected, rv.f64_value))
diff --git a/test/test_fib.py b/test/test_fib.py
new file mode 100644
index 00000000000..7c08722d803
--- /dev/null
+++ b/test/test_fib.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python3
+
+import unittest
+
+from framework import tag_fixme_vpp_workers
+from framework import VppTestCase, VppTestRunner
+
+
+@tag_fixme_vpp_workers
+class TestFIB(VppTestCase):
+ """ FIB Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestFIB, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestFIB, cls).tearDownClass()
+
+ def test_fib(self):
+ """ FIB Unit Tests """
+ error = self.vapi.cli("test fib")
+
+ # shameless test of CLIs to bump lcov results...
+ # no i mean to ensure they don't crash
+ self.logger.info(self.vapi.cli("sh fib source"))
+ self.logger.info(self.vapi.cli("sh fib source prio"))
+ self.logger.info(self.vapi.cli("sh fib memory"))
+ self.logger.info(self.vapi.cli("sh fib entry"))
+ self.logger.info(self.vapi.cli("sh fib entry 0"))
+ self.logger.info(self.vapi.cli("sh fib entry 10000"))
+ self.logger.info(self.vapi.cli("sh fib entry-delegate"))
+ self.logger.info(self.vapi.cli("sh fib paths"))
+ self.logger.info(self.vapi.cli("sh fib paths 0"))
+ self.logger.info(self.vapi.cli("sh fib paths 10000"))
+ self.logger.info(self.vapi.cli("sh fib path-list"))
+ self.logger.info(self.vapi.cli("sh fib path-list 0"))
+ self.logger.info(self.vapi.cli("sh fib path-list 10000"))
+ self.logger.info(self.vapi.cli("sh fib walk"))
+ self.logger.info(self.vapi.cli("sh fib uRPF"))
+
+ if error:
+ self.logger.critical(error)
+ self.assertNotIn("Failed", error)
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_flowprobe.py b/test/test_flowprobe.py
new file mode 100644
index 00000000000..517729d8591
--- /dev/null
+++ b/test/test_flowprobe.py
@@ -0,0 +1,1094 @@
+#!/usr/bin/env python3
+from __future__ import print_function
+import binascii
+import random
+import socket
+import unittest
+import time
+import re
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, TCP, UDP
+from scapy.layers.inet6 import IPv6
+
+from framework import tag_fixme_vpp_workers
+from framework import VppTestCase, VppTestRunner, running_extended_tests
+from framework import tag_run_solo
+from vpp_object import VppObject
+from vpp_pg_interface import CaptureTimeoutError
+from util import ppp
+from ipfix import IPFIX, Set, Template, Data, IPFIXDecoder
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_papi.macaddress import mac_ntop
+from socket import inet_ntop
+from vpp_papi import VppEnum
+
+
+class VppCFLOW(VppObject):
+ """CFLOW object for IPFIX exporter and Flowprobe feature"""
+
+ def __init__(self, test, intf='pg2', active=0, passive=0, timeout=100,
+ mtu=1024, datapath='l2', layer='l2 l3 l4'):
+ self._test = test
+ self._intf = intf
+ self._active = active
+ if passive == 0 or passive < active:
+ self._passive = active+1
+ else:
+ self._passive = passive
+ self._datapath = datapath # l2 ip4 ip6
+ self._collect = layer # l2 l3 l4
+ self._timeout = timeout
+ self._mtu = mtu
+ self._configured = False
+
+ def add_vpp_config(self):
+ self.enable_exporter()
+ l2_flag = 0
+ l3_flag = 0
+ l4_flag = 0
+ if 'l2' in self._collect.lower():
+ l2_flag = (VppEnum.vl_api_flowprobe_record_flags_t.
+ FLOWPROBE_RECORD_FLAG_L2)
+ if 'l3' in self._collect.lower():
+ l3_flag = (VppEnum.vl_api_flowprobe_record_flags_t.
+ FLOWPROBE_RECORD_FLAG_L3)
+ if 'l4' in self._collect.lower():
+ l4_flag = (VppEnum.vl_api_flowprobe_record_flags_t.
+ FLOWPROBE_RECORD_FLAG_L4)
+ self._test.vapi.flowprobe_params(
+ record_flags=(l2_flag | l3_flag | l4_flag),
+ active_timer=self._active, passive_timer=self._passive)
+ self.enable_flowprobe_feature()
+ self._test.vapi.cli("ipfix flush")
+ self._configured = True
+
+ def remove_vpp_config(self):
+ self.disable_exporter()
+ self.disable_flowprobe_feature()
+ self._test.vapi.cli("ipfix flush")
+ self._configured = False
+
+ def enable_exporter(self):
+ self._test.vapi.set_ipfix_exporter(
+ collector_address=self._test.pg0.remote_ip4,
+ src_address=self._test.pg0.local_ip4,
+ path_mtu=self._mtu,
+ template_interval=self._timeout)
+
+ def enable_flowprobe_feature(self):
+ self._test.vapi.ppcli("flowprobe feature add-del %s %s" %
+ (self._intf, self._datapath))
+
+ def disable_exporter(self):
+ self._test.vapi.cli("set ipfix exporter collector 0.0.0.0")
+
+ def disable_flowprobe_feature(self):
+ self._test.vapi.cli("flowprobe feature add-del %s %s disable" %
+ (self._intf, self._datapath))
+
+ def object_id(self):
+ return "ipfix-collector-%s-%s" % (self._src, self.dst)
+
+ def query_vpp_config(self):
+ return self._configured
+
+ def verify_templates(self, decoder=None, timeout=1, count=3):
+ templates = []
+ p = self._test.wait_for_cflow_packet(self._test.collector, 2, timeout)
+ self._test.assertTrue(p.haslayer(IPFIX))
+ if decoder is not None and p.haslayer(Template):
+ templates.append(p[Template].templateID)
+ decoder.add_template(p.getlayer(Template))
+ if count > 1:
+ p = self._test.wait_for_cflow_packet(self._test.collector, 2)
+ self._test.assertTrue(p.haslayer(IPFIX))
+ if decoder is not None and p.haslayer(Template):
+ templates.append(p[Template].templateID)
+ decoder.add_template(p.getlayer(Template))
+ if count > 2:
+ p = self._test.wait_for_cflow_packet(self._test.collector, 2)
+ self._test.assertTrue(p.haslayer(IPFIX))
+ if decoder is not None and p.haslayer(Template):
+ templates.append(p[Template].templateID)
+ decoder.add_template(p.getlayer(Template))
+ return templates
+
+
+class MethodHolder(VppTestCase):
+ """ Flow-per-packet plugin: test L2, IP4, IP6 reporting """
+
+ # Test variables
+ debug_print = False
+ max_number_of_packets = 10
+ pkts = []
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Perform standard class setup (defined by class method setUpClass in
+ class VppTestCase) before running the test case, set test case related
+ variables and configure VPP.
+ """
+ super(MethodHolder, cls).setUpClass()
+ try:
+ # Create pg interfaces
+ cls.create_pg_interfaces(range(9))
+
+ # Packet sizes
+ cls.pg_if_packet_sizes = [64, 512, 1518, 9018]
+
+ # Create BD with MAC learning and unknown unicast flooding disabled
+ # and put interfaces to this BD
+ cls.vapi.bridge_domain_add_del(bd_id=1, uu_flood=1, learn=1)
+ cls.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=cls.pg1._sw_if_index, bd_id=1)
+ cls.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=cls.pg2._sw_if_index, bd_id=1)
+
+ # Set up all interfaces
+ for i in cls.pg_interfaces:
+ i.admin_up()
+
+ cls.pg0.config_ip4()
+ cls.pg0.configure_ipv4_neighbors()
+ cls.collector = cls.pg0
+
+ cls.pg1.config_ip4()
+ cls.pg1.resolve_arp()
+ cls.pg2.config_ip4()
+ cls.pg2.resolve_arp()
+ cls.pg3.config_ip4()
+ cls.pg3.resolve_arp()
+ cls.pg4.config_ip4()
+ cls.pg4.resolve_arp()
+ cls.pg7.config_ip4()
+ cls.pg8.config_ip4()
+ cls.pg8.configure_ipv4_neighbors()
+
+ cls.pg5.config_ip6()
+ cls.pg5.resolve_ndp()
+ cls.pg5.disable_ipv6_ra()
+ cls.pg6.config_ip6()
+ cls.pg6.resolve_ndp()
+ cls.pg6.disable_ipv6_ra()
+ except Exception:
+ super(MethodHolder, cls).tearDownClass()
+ raise
+
+ @classmethod
+ def tearDownClass(cls):
+ super(MethodHolder, cls).tearDownClass()
+
+ def create_stream(self, src_if=None, dst_if=None, packets=None,
+ size=None, ip_ver='v4'):
+ """Create a packet stream to tickle the plugin
+
+ :param VppInterface src_if: Source interface for packet stream
+ :param VppInterface src_if: Dst interface for packet stream
+ """
+ if src_if is None:
+ src_if = self.pg1
+ if dst_if is None:
+ dst_if = self.pg2
+ self.pkts = []
+ if packets is None:
+ packets = random.randint(1, self.max_number_of_packets)
+ pkt_size = size
+ for p in range(0, packets):
+ if size is None:
+ pkt_size = random.choice(self.pg_if_packet_sizes)
+ info = self.create_packet_info(src_if, dst_if)
+ payload = self.info_to_payload(info)
+ p = Ether(src=src_if.remote_mac, dst=src_if.local_mac)
+ if ip_ver == 'v4':
+ p /= IP(src=src_if.remote_ip4, dst=dst_if.remote_ip4)
+ else:
+ p /= IPv6(src=src_if.remote_ip6, dst=dst_if.remote_ip6)
+ p /= UDP(sport=1234, dport=4321)
+ p /= Raw(payload)
+ info.data = p.copy()
+ self.extend_packet(p, pkt_size)
+ self.pkts.append(p)
+
+ def verify_cflow_data(self, decoder, capture, cflow):
+ octets = 0
+ packets = 0
+ for p in capture:
+ octets += p[IP].len
+ packets += 1
+ if cflow.haslayer(Data):
+ data = decoder.decode_data_set(cflow.getlayer(Set))
+ for record in data:
+ self.assertEqual(int(binascii.hexlify(record[1]), 16), octets)
+ self.assertEqual(int(binascii.hexlify(record[2]), 16), packets)
+
+ def send_packets(self, src_if=None, dst_if=None):
+ if src_if is None:
+ src_if = self.pg1
+ if dst_if is None:
+ dst_if = self.pg2
+ self.pg_enable_capture([dst_if])
+ src_if.add_stream(self.pkts)
+ self.pg_start()
+ return dst_if.get_capture(len(self.pkts))
+
+ def verify_cflow_data_detail(self, decoder, capture, cflow,
+ data_set={1: 'octets', 2: 'packets'},
+ ip_ver='v4'):
+ if self.debug_print:
+ print(capture[0].show())
+ if cflow.haslayer(Data):
+ data = decoder.decode_data_set(cflow.getlayer(Set))
+ if self.debug_print:
+ print(data)
+ if ip_ver == 'v4':
+ ip_layer = capture[0][IP]
+ else:
+ ip_layer = capture[0][IPv6]
+ if data_set is not None:
+ for record in data:
+ # skip flow if ingress/egress interface is 0
+ if int(binascii.hexlify(record[10]), 16) == 0:
+ continue
+ if int(binascii.hexlify(record[14]), 16) == 0:
+ continue
+
+ for field in data_set:
+ if field not in record.keys():
+ continue
+ value = data_set[field]
+ if value == 'octets':
+ value = ip_layer.len
+ if ip_ver == 'v6':
+ value += 40 # ??? is this correct
+ elif value == 'packets':
+ value = 1
+ elif value == 'src_ip':
+ if ip_ver == 'v4':
+ ip = socket.inet_pton(socket.AF_INET,
+ ip_layer.src)
+ else:
+ ip = socket.inet_pton(socket.AF_INET6,
+ ip_layer.src)
+ value = int(binascii.hexlify(ip), 16)
+ elif value == 'dst_ip':
+ if ip_ver == 'v4':
+ ip = socket.inet_pton(socket.AF_INET,
+ ip_layer.dst)
+ else:
+ ip = socket.inet_pton(socket.AF_INET6,
+ ip_layer.dst)
+ value = int(binascii.hexlify(ip), 16)
+ elif value == 'sport':
+ value = int(capture[0][UDP].sport)
+ elif value == 'dport':
+ value = int(capture[0][UDP].dport)
+ self.assertEqual(int(binascii.hexlify(
+ record[field]), 16),
+ value)
+
+ def verify_cflow_data_notimer(self, decoder, capture, cflows):
+ idx = 0
+ for cflow in cflows:
+ if cflow.haslayer(Data):
+ data = decoder.decode_data_set(cflow.getlayer(Set))
+ else:
+ raise Exception("No CFLOW data")
+
+ for rec in data:
+ p = capture[idx]
+ idx += 1
+ self.assertEqual(p[IP].len, int(
+ binascii.hexlify(rec[1]), 16))
+ self.assertEqual(1, int(
+ binascii.hexlify(rec[2]), 16))
+ self.assertEqual(len(capture), idx)
+
+ def wait_for_cflow_packet(self, collector_intf, set_id=2, timeout=1,
+ expected=True):
+ """ wait for CFLOW packet and verify its correctness
+
+ :param timeout: how long to wait
+
+ :returns: tuple (packet, time spent waiting for packet)
+ """
+ self.logger.info("IPFIX: Waiting for CFLOW packet")
+ deadline = time.time() + timeout
+ counter = 0
+ # self.logger.debug(self.vapi.ppcli("show flow table"))
+ while True:
+ counter += 1
+ # sanity check
+ self.assert_in_range(counter, 0, 100, "number of packets ignored")
+ time_left = deadline - time.time()
+ try:
+ if time_left < 0 and expected:
+ # self.logger.debug(self.vapi.ppcli("show flow table"))
+ raise CaptureTimeoutError(
+ "Packet did not arrive within timeout")
+ p = collector_intf.wait_for_packet(timeout=time_left)
+ except CaptureTimeoutError:
+ if expected:
+ # self.logger.debug(self.vapi.ppcli("show flow table"))
+ raise CaptureTimeoutError(
+ "Packet did not arrive within timeout")
+ else:
+ return
+ if not expected:
+ raise CaptureTimeoutError("Packet arrived even not expected")
+ self.assertEqual(p[Set].setID, set_id)
+ # self.logger.debug(self.vapi.ppcli("show flow table"))
+ self.logger.debug(ppp("IPFIX: Got packet:", p))
+ break
+ return p
+
+
+@tag_run_solo
+@tag_fixme_vpp_workers
+class Flowprobe(MethodHolder):
+ """Template verification, timer tests"""
+
+ @classmethod
+ def setUpClass(cls):
+ super(Flowprobe, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(Flowprobe, cls).tearDownClass()
+
+ def test_0001(self):
+ """ timer less than template timeout"""
+ self.logger.info("FFP_TEST_START_0001")
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pkts = []
+
+ ipfix = VppCFLOW(test=self, active=2)
+ ipfix.add_vpp_config()
+
+ ipfix_decoder = IPFIXDecoder()
+ # template packet should arrive immediately
+ templates = ipfix.verify_templates(ipfix_decoder)
+
+ self.create_stream(packets=1)
+ self.send_packets()
+ capture = self.pg2.get_capture(1)
+
+ # make sure the one packet we expect actually showed up
+ cflow = self.wait_for_cflow_packet(self.collector, templates[1], 15)
+ self.verify_cflow_data(ipfix_decoder, capture, cflow)
+
+ ipfix.remove_vpp_config()
+ self.logger.info("FFP_TEST_FINISH_0001")
+
+ def test_0002(self):
+ """ timer greater than template timeout"""
+ self.logger.info("FFP_TEST_START_0002")
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pkts = []
+
+ ipfix = VppCFLOW(test=self, timeout=3, active=4)
+ ipfix.add_vpp_config()
+
+ ipfix_decoder = IPFIXDecoder()
+ # template packet should arrive immediately
+ ipfix.verify_templates()
+
+ self.create_stream(packets=2)
+ self.send_packets()
+ capture = self.pg2.get_capture(2)
+
+ # next set of template packet should arrive after 20 seconds
+ # template packet should arrive within 20 s
+ templates = ipfix.verify_templates(ipfix_decoder, timeout=5)
+
+ # make sure the one packet we expect actually showed up
+ cflow = self.wait_for_cflow_packet(self.collector, templates[1], 15)
+ self.verify_cflow_data(ipfix_decoder, capture, cflow)
+
+ ipfix.remove_vpp_config()
+ self.logger.info("FFP_TEST_FINISH_0002")
+
+ def test_cflow_packet(self):
+ """verify cflow packet fields"""
+ self.logger.info("FFP_TEST_START_0000")
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pkts = []
+
+ ipfix = VppCFLOW(test=self, intf='pg8', datapath="ip4",
+ layer='l2 l3 l4', active=2)
+ ipfix.add_vpp_config()
+
+ route_9001 = VppIpRoute(self, "9.0.0.0", 24,
+ [VppRoutePath(self.pg8._remote_hosts[0].ip4,
+ self.pg8.sw_if_index)])
+ route_9001.add_vpp_config()
+
+ ipfix_decoder = IPFIXDecoder()
+ templates = ipfix.verify_templates(ipfix_decoder, count=1)
+
+ self.pkts = [(Ether(dst=self.pg7.local_mac,
+ src=self.pg7.remote_mac) /
+ IP(src=self.pg7.remote_ip4, dst="9.0.0.100") /
+ TCP(sport=1234, dport=4321, flags=80) /
+ Raw(b'\xa5' * 100))]
+
+ nowUTC = int(time.time())
+ nowUNIX = nowUTC+2208988800
+ self.send_packets(src_if=self.pg7, dst_if=self.pg8)
+
+ cflow = self.wait_for_cflow_packet(self.collector, templates[0], 10)
+ self.collector.get_capture(2)
+
+ if cflow[0].haslayer(IPFIX):
+ self.assertEqual(cflow[IPFIX].version, 10)
+ self.assertEqual(cflow[IPFIX].observationDomainID, 1)
+ self.assertEqual(cflow[IPFIX].sequenceNumber, 0)
+ self.assertAlmostEqual(cflow[IPFIX].exportTime, nowUTC, delta=5)
+ if cflow.haslayer(Data):
+ record = ipfix_decoder.decode_data_set(cflow[0].getlayer(Set))[0]
+ # ingress interface
+ self.assertEqual(int(binascii.hexlify(record[10]), 16), 8)
+ # egress interface
+ self.assertEqual(int(binascii.hexlify(record[14]), 16), 9)
+ # packets
+ self.assertEqual(int(binascii.hexlify(record[2]), 16), 1)
+ # src mac
+ self.assertEqual(mac_ntop(record[56]), self.pg8.local_mac)
+ # dst mac
+ self.assertEqual(mac_ntop(record[80]), self.pg8.remote_mac)
+ flowTimestamp = int(binascii.hexlify(record[156]), 16) >> 32
+ # flow start timestamp
+ self.assertAlmostEqual(flowTimestamp, nowUNIX, delta=1)
+ flowTimestamp = int(binascii.hexlify(record[157]), 16) >> 32
+ # flow end timestamp
+ self.assertAlmostEqual(flowTimestamp, nowUNIX, delta=1)
+ # ethernet type
+ self.assertEqual(int(binascii.hexlify(record[256]), 16), 8)
+ # src ip
+ self.assertEqual(inet_ntop(socket.AF_INET, record[8]),
+ self.pg7.remote_ip4)
+ # dst ip
+ self.assertEqual(inet_ntop(socket.AF_INET, record[12]),
+ "9.0.0.100")
+ # protocol (TCP)
+ self.assertEqual(int(binascii.hexlify(record[4]), 16), 6)
+ # src port
+ self.assertEqual(int(binascii.hexlify(record[7]), 16), 1234)
+ # dst port
+ self.assertEqual(int(binascii.hexlify(record[11]), 16), 4321)
+ # tcp flags
+ self.assertEqual(int(binascii.hexlify(record[6]), 16), 80)
+
+ ipfix.remove_vpp_config()
+ self.logger.info("FFP_TEST_FINISH_0000")
+
+
+@tag_fixme_vpp_workers
+class Datapath(MethodHolder):
+ """collect information on Ethernet, IP4 and IP6 datapath (no timers)"""
+
+ @classmethod
+ def setUpClass(cls):
+ super(Datapath, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(Datapath, cls).tearDownClass()
+
+ def test_templatesL2(self):
+ """ verify template on L2 datapath"""
+ self.logger.info("FFP_TEST_START_0000")
+ self.pg_enable_capture(self.pg_interfaces)
+
+ ipfix = VppCFLOW(test=self, layer='l2')
+ ipfix.add_vpp_config()
+
+ # template packet should arrive immediately
+ self.vapi.ipfix_flush()
+ ipfix.verify_templates(timeout=3, count=1)
+ self.collector.get_capture(1)
+
+ ipfix.remove_vpp_config()
+ self.logger.info("FFP_TEST_FINISH_0000")
+
+ def test_L2onL2(self):
+ """ L2 data on L2 datapath"""
+ self.logger.info("FFP_TEST_START_0001")
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pkts = []
+
+ ipfix = VppCFLOW(test=self, layer='l2')
+ ipfix.add_vpp_config()
+
+ ipfix_decoder = IPFIXDecoder()
+ # template packet should arrive immediately
+ templates = ipfix.verify_templates(ipfix_decoder, count=1)
+
+ self.create_stream(packets=1)
+ capture = self.send_packets()
+
+ # make sure the one packet we expect actually showed up
+ self.vapi.ipfix_flush()
+ cflow = self.wait_for_cflow_packet(self.collector, templates[0])
+ self.verify_cflow_data_detail(ipfix_decoder, capture, cflow,
+ {2: 'packets', 256: 8})
+ self.collector.get_capture(2)
+
+ ipfix.remove_vpp_config()
+ self.logger.info("FFP_TEST_FINISH_0001")
+
+ def test_L3onL2(self):
+ """ L3 data on L2 datapath"""
+ self.logger.info("FFP_TEST_START_0002")
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pkts = []
+
+ ipfix = VppCFLOW(test=self, layer='l3')
+ ipfix.add_vpp_config()
+
+ ipfix_decoder = IPFIXDecoder()
+ # template packet should arrive immediately
+ templates = ipfix.verify_templates(ipfix_decoder, count=2)
+
+ self.create_stream(packets=1)
+ capture = self.send_packets()
+
+ # make sure the one packet we expect actually showed up
+ self.vapi.ipfix_flush()
+ cflow = self.wait_for_cflow_packet(self.collector, templates[0])
+ self.verify_cflow_data_detail(ipfix_decoder, capture, cflow,
+ {2: 'packets', 4: 17,
+ 8: 'src_ip', 12: 'dst_ip'})
+
+ self.collector.get_capture(3)
+
+ ipfix.remove_vpp_config()
+ self.logger.info("FFP_TEST_FINISH_0002")
+
+ def test_L4onL2(self):
+ """ L4 data on L2 datapath"""
+ self.logger.info("FFP_TEST_START_0003")
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pkts = []
+
+ ipfix = VppCFLOW(test=self, layer='l4')
+ ipfix.add_vpp_config()
+
+ ipfix_decoder = IPFIXDecoder()
+ # template packet should arrive immediately
+ templates = ipfix.verify_templates(ipfix_decoder, count=2)
+
+ self.create_stream(packets=1)
+ capture = self.send_packets()
+
+ # make sure the one packet we expect actually showed up
+ self.vapi.ipfix_flush()
+ cflow = self.wait_for_cflow_packet(self.collector, templates[0])
+ self.verify_cflow_data_detail(ipfix_decoder, capture, cflow,
+ {2: 'packets', 7: 'sport', 11: 'dport'})
+
+ self.collector.get_capture(3)
+
+ ipfix.remove_vpp_config()
+ self.logger.info("FFP_TEST_FINISH_0003")
+
+ def test_templatesIp4(self):
+ """ verify templates on IP4 datapath"""
+ self.logger.info("FFP_TEST_START_0000")
+
+ self.pg_enable_capture(self.pg_interfaces)
+
+ ipfix = VppCFLOW(test=self, datapath='ip4')
+ ipfix.add_vpp_config()
+
+ # template packet should arrive immediately
+ self.vapi.ipfix_flush()
+ ipfix.verify_templates(timeout=3, count=1)
+ self.collector.get_capture(1)
+
+ ipfix.remove_vpp_config()
+
+ self.logger.info("FFP_TEST_FINISH_0000")
+
+ def test_L2onIP4(self):
+ """ L2 data on IP4 datapath"""
+ self.logger.info("FFP_TEST_START_0001")
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pkts = []
+
+ ipfix = VppCFLOW(test=self, intf='pg4', layer='l2', datapath='ip4')
+ ipfix.add_vpp_config()
+
+ ipfix_decoder = IPFIXDecoder()
+ # template packet should arrive immediately
+ templates = ipfix.verify_templates(ipfix_decoder, count=1)
+
+ self.create_stream(src_if=self.pg3, dst_if=self.pg4, packets=1)
+ capture = self.send_packets(src_if=self.pg3, dst_if=self.pg4)
+
+ # make sure the one packet we expect actually showed up
+ self.vapi.ipfix_flush()
+ cflow = self.wait_for_cflow_packet(self.collector, templates[0])
+ self.verify_cflow_data_detail(ipfix_decoder, capture, cflow,
+ {2: 'packets', 256: 8})
+
+ # expected two templates and one cflow packet
+ self.collector.get_capture(2)
+
+ ipfix.remove_vpp_config()
+ self.logger.info("FFP_TEST_FINISH_0001")
+
+ def test_L3onIP4(self):
+ """ L3 data on IP4 datapath"""
+ self.logger.info("FFP_TEST_START_0002")
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pkts = []
+
+ ipfix = VppCFLOW(test=self, intf='pg4', layer='l3', datapath='ip4')
+ ipfix.add_vpp_config()
+
+ ipfix_decoder = IPFIXDecoder()
+ # template packet should arrive immediately
+ templates = ipfix.verify_templates(ipfix_decoder, count=1)
+
+ self.create_stream(src_if=self.pg3, dst_if=self.pg4, packets=1)
+ capture = self.send_packets(src_if=self.pg3, dst_if=self.pg4)
+
+ # make sure the one packet we expect actually showed up
+ self.vapi.ipfix_flush()
+ cflow = self.wait_for_cflow_packet(self.collector, templates[0])
+ self.verify_cflow_data_detail(ipfix_decoder, capture, cflow,
+ {1: 'octets', 2: 'packets',
+ 8: 'src_ip', 12: 'dst_ip'})
+
+ # expected two templates and one cflow packet
+ self.collector.get_capture(2)
+
+ ipfix.remove_vpp_config()
+ self.logger.info("FFP_TEST_FINISH_0002")
+
+ def test_L4onIP4(self):
+ """ L4 data on IP4 datapath"""
+ self.logger.info("FFP_TEST_START_0003")
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pkts = []
+
+ ipfix = VppCFLOW(test=self, intf='pg4', layer='l4', datapath='ip4')
+ ipfix.add_vpp_config()
+
+ ipfix_decoder = IPFIXDecoder()
+ # template packet should arrive immediately
+ templates = ipfix.verify_templates(ipfix_decoder, count=1)
+
+ self.create_stream(src_if=self.pg3, dst_if=self.pg4, packets=1)
+ capture = self.send_packets(src_if=self.pg3, dst_if=self.pg4)
+
+ # make sure the one packet we expect actually showed up
+ self.vapi.ipfix_flush()
+ cflow = self.wait_for_cflow_packet(self.collector, templates[0])
+ self.verify_cflow_data_detail(ipfix_decoder, capture, cflow,
+ {2: 'packets', 7: 'sport', 11: 'dport'})
+
+ # expected two templates and one cflow packet
+ self.collector.get_capture(2)
+
+ ipfix.remove_vpp_config()
+ self.logger.info("FFP_TEST_FINISH_0003")
+
+ def test_templatesIP6(self):
+ """ verify templates on IP6 datapath"""
+ self.logger.info("FFP_TEST_START_0000")
+ self.pg_enable_capture(self.pg_interfaces)
+
+ ipfix = VppCFLOW(test=self, datapath='ip6')
+ ipfix.add_vpp_config()
+
+ # template packet should arrive immediately
+ ipfix.verify_templates(count=1)
+ self.collector.get_capture(1)
+
+ ipfix.remove_vpp_config()
+
+ self.logger.info("FFP_TEST_FINISH_0000")
+
+ def test_L2onIP6(self):
+ """ L2 data on IP6 datapath"""
+ self.logger.info("FFP_TEST_START_0001")
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pkts = []
+
+ ipfix = VppCFLOW(test=self, intf='pg6', layer='l2', datapath='ip6')
+ ipfix.add_vpp_config()
+
+ ipfix_decoder = IPFIXDecoder()
+ # template packet should arrive immediately
+ templates = ipfix.verify_templates(ipfix_decoder, count=1)
+
+ self.create_stream(src_if=self.pg5, dst_if=self.pg6, packets=1,
+ ip_ver='IPv6')
+ capture = self.send_packets(src_if=self.pg5, dst_if=self.pg6)
+
+ # make sure the one packet we expect actually showed up
+ self.vapi.ipfix_flush()
+ cflow = self.wait_for_cflow_packet(self.collector, templates[0])
+ self.verify_cflow_data_detail(ipfix_decoder, capture, cflow,
+ {2: 'packets', 256: 56710},
+ ip_ver='v6')
+
+ # expected two templates and one cflow packet
+ self.collector.get_capture(2)
+
+ ipfix.remove_vpp_config()
+ self.logger.info("FFP_TEST_FINISH_0001")
+
+ def test_L3onIP6(self):
+ """ L3 data on IP6 datapath"""
+ self.logger.info("FFP_TEST_START_0002")
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pkts = []
+
+ ipfix = VppCFLOW(test=self, intf='pg6', layer='l3', datapath='ip6')
+ ipfix.add_vpp_config()
+
+ ipfix_decoder = IPFIXDecoder()
+ # template packet should arrive immediately
+ templates = ipfix.verify_templates(ipfix_decoder, count=1)
+
+ self.create_stream(src_if=self.pg5, dst_if=self.pg6, packets=1,
+ ip_ver='IPv6')
+ capture = self.send_packets(src_if=self.pg5, dst_if=self.pg6)
+
+ # make sure the one packet we expect actually showed up
+ self.vapi.ipfix_flush()
+ cflow = self.wait_for_cflow_packet(self.collector, templates[0])
+ self.verify_cflow_data_detail(ipfix_decoder, capture, cflow,
+ {2: 'packets',
+ 27: 'src_ip', 28: 'dst_ip'},
+ ip_ver='v6')
+
+ # expected two templates and one cflow packet
+ self.collector.get_capture(2)
+
+ ipfix.remove_vpp_config()
+ self.logger.info("FFP_TEST_FINISH_0002")
+
+ def test_L4onIP6(self):
+ """ L4 data on IP6 datapath"""
+ self.logger.info("FFP_TEST_START_0003")
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pkts = []
+
+ ipfix = VppCFLOW(test=self, intf='pg6', layer='l4', datapath='ip6')
+ ipfix.add_vpp_config()
+
+ ipfix_decoder = IPFIXDecoder()
+ # template packet should arrive immediately
+ templates = ipfix.verify_templates(ipfix_decoder, count=1)
+
+ self.create_stream(src_if=self.pg5, dst_if=self.pg6, packets=1,
+ ip_ver='IPv6')
+ capture = self.send_packets(src_if=self.pg5, dst_if=self.pg6)
+
+ # make sure the one packet we expect actually showed up
+ self.vapi.ipfix_flush()
+ cflow = self.wait_for_cflow_packet(self.collector, templates[0])
+ self.verify_cflow_data_detail(ipfix_decoder, capture, cflow,
+ {2: 'packets', 7: 'sport', 11: 'dport'},
+ ip_ver='v6')
+
+ # expected two templates and one cflow packet
+ self.collector.get_capture(2)
+
+ ipfix.remove_vpp_config()
+ self.logger.info("FFP_TEST_FINISH_0003")
+
+ def test_0001(self):
+ """ no timers, one CFLOW packet, 9 Flows inside"""
+ self.logger.info("FFP_TEST_START_0001")
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pkts = []
+
+ ipfix = VppCFLOW(test=self)
+ ipfix.add_vpp_config()
+
+ ipfix_decoder = IPFIXDecoder()
+ # template packet should arrive immediately
+ templates = ipfix.verify_templates(ipfix_decoder)
+
+ self.create_stream(packets=9)
+ capture = self.send_packets()
+
+ # make sure the one packet we expect actually showed up
+ self.vapi.ipfix_flush()
+ cflow = self.wait_for_cflow_packet(self.collector, templates[1])
+ self.verify_cflow_data_notimer(ipfix_decoder, capture, [cflow])
+ self.collector.get_capture(4)
+
+ ipfix.remove_vpp_config()
+ self.logger.info("FFP_TEST_FINISH_0001")
+
+ def test_0002(self):
+ """ no timers, two CFLOW packets (mtu=256), 3 Flows in each"""
+ self.logger.info("FFP_TEST_START_0002")
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pkts = []
+
+ ipfix = VppCFLOW(test=self, mtu=256)
+ ipfix.add_vpp_config()
+
+ ipfix_decoder = IPFIXDecoder()
+ # template packet should arrive immediately
+ self.vapi.ipfix_flush()
+ templates = ipfix.verify_templates(ipfix_decoder)
+
+ self.create_stream(packets=6)
+ capture = self.send_packets()
+
+ # make sure the one packet we expect actually showed up
+ cflows = []
+ self.vapi.ipfix_flush()
+ cflows.append(self.wait_for_cflow_packet(self.collector,
+ templates[1]))
+ cflows.append(self.wait_for_cflow_packet(self.collector,
+ templates[1]))
+ self.verify_cflow_data_notimer(ipfix_decoder, capture, cflows)
+ self.collector.get_capture(5)
+
+ ipfix.remove_vpp_config()
+ self.logger.info("FFP_TEST_FINISH_0002")
+
+
+@unittest.skipUnless(running_extended_tests, "part of extended tests")
+class DisableIPFIX(MethodHolder):
+ """Disable IPFIX"""
+
+ @classmethod
+ def setUpClass(cls):
+ super(DisableIPFIX, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(DisableIPFIX, cls).tearDownClass()
+
+ def test_0001(self):
+ """ disable IPFIX after first packets"""
+ self.logger.info("FFP_TEST_START_0001")
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pkts = []
+
+ ipfix = VppCFLOW(test=self)
+ ipfix.add_vpp_config()
+
+ ipfix_decoder = IPFIXDecoder()
+ # template packet should arrive immediately
+ templates = ipfix.verify_templates(ipfix_decoder)
+
+ self.create_stream()
+ self.send_packets()
+
+ # make sure the one packet we expect actually showed up
+ self.vapi.ipfix_flush()
+ self.wait_for_cflow_packet(self.collector, templates[1])
+ self.collector.get_capture(4)
+
+ # disable IPFIX
+ ipfix.disable_exporter()
+ self.pg_enable_capture([self.collector])
+
+ self.send_packets()
+
+ # make sure no one packet arrived in 1 minute
+ self.vapi.ipfix_flush()
+ self.wait_for_cflow_packet(self.collector, templates[1],
+ expected=False)
+ self.collector.get_capture(0)
+
+ ipfix.remove_vpp_config()
+ self.logger.info("FFP_TEST_FINISH_0001")
+
+
+@unittest.skipUnless(running_extended_tests, "part of extended tests")
+class ReenableIPFIX(MethodHolder):
+ """Re-enable IPFIX"""
+
+ @classmethod
+ def setUpClass(cls):
+ super(ReenableIPFIX, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(ReenableIPFIX, cls).tearDownClass()
+
+ def test_0011(self):
+ """ disable IPFIX after first packets and re-enable after few packets
+ """
+ self.logger.info("FFP_TEST_START_0001")
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pkts = []
+
+ ipfix = VppCFLOW(test=self)
+ ipfix.add_vpp_config()
+
+ ipfix_decoder = IPFIXDecoder()
+ # template packet should arrive immediately
+ templates = ipfix.verify_templates(ipfix_decoder)
+
+ self.create_stream(packets=5)
+ self.send_packets()
+
+ # make sure the one packet we expect actually showed up
+ self.vapi.ipfix_flush()
+ self.wait_for_cflow_packet(self.collector, templates[1])
+ self.collector.get_capture(4)
+
+ # disable IPFIX
+ ipfix.disable_exporter()
+ self.vapi.ipfix_flush()
+ self.pg_enable_capture([self.collector])
+
+ self.send_packets()
+
+ # make sure no one packet arrived in active timer span
+ self.vapi.ipfix_flush()
+ self.wait_for_cflow_packet(self.collector, templates[1],
+ expected=False)
+ self.collector.get_capture(0)
+ self.pg2.get_capture(5)
+
+ # enable IPFIX
+ ipfix.enable_exporter()
+
+ capture = self.collector.get_capture(4)
+ nr_templates = 0
+ nr_data = 0
+ for p in capture:
+ self.assertTrue(p.haslayer(IPFIX))
+ if p.haslayer(Template):
+ nr_templates += 1
+ self.assertTrue(nr_templates, 3)
+ for p in capture:
+ self.assertTrue(p.haslayer(IPFIX))
+ if p.haslayer(Data):
+ nr_data += 1
+ self.assertTrue(nr_templates, 1)
+
+ ipfix.remove_vpp_config()
+ self.logger.info("FFP_TEST_FINISH_0001")
+
+
+@unittest.skipUnless(running_extended_tests, "part of extended tests")
+class DisableFP(MethodHolder):
+ """Disable Flowprobe feature"""
+
+ @classmethod
+ def setUpClass(cls):
+ super(DisableFP, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(DisableFP, cls).tearDownClass()
+
+ def test_0001(self):
+ """ disable flowprobe feature after first packets"""
+ self.logger.info("FFP_TEST_START_0001")
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pkts = []
+ ipfix = VppCFLOW(test=self)
+ ipfix.add_vpp_config()
+
+ ipfix_decoder = IPFIXDecoder()
+ # template packet should arrive immediately
+ templates = ipfix.verify_templates(ipfix_decoder)
+
+ self.create_stream()
+ self.send_packets()
+
+ # make sure the one packet we expect actually showed up
+ self.vapi.ipfix_flush()
+ self.wait_for_cflow_packet(self.collector, templates[1])
+ self.collector.get_capture(4)
+
+ # disable IPFIX
+ ipfix.disable_flowprobe_feature()
+ self.pg_enable_capture([self.collector])
+
+ self.send_packets()
+
+ # make sure no one packet arrived in active timer span
+ self.vapi.ipfix_flush()
+ self.wait_for_cflow_packet(self.collector, templates[1],
+ expected=False)
+ self.collector.get_capture(0)
+
+ ipfix.remove_vpp_config()
+ self.logger.info("FFP_TEST_FINISH_0001")
+
+
+@unittest.skipUnless(running_extended_tests, "part of extended tests")
+class ReenableFP(MethodHolder):
+ """Re-enable Flowprobe feature"""
+
+ @classmethod
+ def setUpClass(cls):
+ super(ReenableFP, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(ReenableFP, cls).tearDownClass()
+
+ def test_0001(self):
+ """ disable flowprobe feature after first packets and re-enable
+ after few packets """
+ self.logger.info("FFP_TEST_START_0001")
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pkts = []
+
+ ipfix = VppCFLOW(test=self)
+ ipfix.add_vpp_config()
+
+ ipfix_decoder = IPFIXDecoder()
+ # template packet should arrive immediately
+ self.vapi.ipfix_flush()
+ templates = ipfix.verify_templates(ipfix_decoder, timeout=3)
+
+ self.create_stream()
+ self.send_packets()
+
+ # make sure the one packet we expect actually showed up
+ self.vapi.ipfix_flush()
+ self.wait_for_cflow_packet(self.collector, templates[1], 5)
+ self.collector.get_capture(4)
+
+ # disable FPP feature
+ ipfix.disable_flowprobe_feature()
+ self.pg_enable_capture([self.collector])
+
+ self.send_packets()
+
+ # make sure no one packet arrived in active timer span
+ self.vapi.ipfix_flush()
+ self.wait_for_cflow_packet(self.collector, templates[1], 5,
+ expected=False)
+ self.collector.get_capture(0)
+
+ # enable FPP feature
+ ipfix.enable_flowprobe_feature()
+ self.vapi.ipfix_flush()
+ templates = ipfix.verify_templates(ipfix_decoder, timeout=3)
+
+ self.send_packets()
+
+ # make sure the next packets (templates and data) we expect actually
+ # showed up
+ self.vapi.ipfix_flush()
+ self.wait_for_cflow_packet(self.collector, templates[1], 5)
+ self.collector.get_capture(4)
+
+ ipfix.remove_vpp_config()
+ self.logger.info("FFP_TEST_FINISH_0001")
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_gbp.py b/test/test_gbp.py
new file mode 100644
index 00000000000..21d0770cf66
--- /dev/null
+++ b/test/test_gbp.py
@@ -0,0 +1,5926 @@
+#!/usr/bin/env python3
+import typing
+from socket import AF_INET6, inet_pton, inet_ntop
+import unittest
+from ipaddress import ip_address, IPv4Network, IPv6Network
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether, ARP, Dot1Q
+from scapy.layers.inet import IP, UDP, ICMP
+from scapy.layers.inet6 import (
+ IPv6,
+ ICMPv6ND_NS,
+ ICMPv6NDOptSrcLLAddr,
+ ICMPv6ND_NA,
+ ICMPv6EchoRequest,
+)
+from scapy.utils6 import in6_getnsma, in6_getnsmac
+from scapy.layers.vxlan import VXLAN
+from scapy.data import ETH_P_IP, ETH_P_IPV6
+
+from framework import tag_fixme_vpp_workers
+from framework import VppTestCase, VppTestRunner
+from vpp_object import VppObject
+from vpp_interface import VppInterface
+from vpp_ip_route import (
+ VppIpRoute,
+ VppRoutePath,
+ VppIpTable,
+ VppIpInterfaceAddress,
+ VppIpInterfaceBind,
+ find_route,
+ FibPathProto,
+ FibPathType,
+)
+from vpp_l2 import (
+ VppBridgeDomain,
+ VppBridgeDomainPort,
+ VppBridgeDomainArpEntry,
+ VppL2FibEntry,
+ find_bridge_domain_port,
+ VppL2Vtr,
+)
+from vpp_sub_interface import L2_VTR_OP, VppDot1QSubint
+from vpp_ip import DpoProto, get_dpo_proto
+from vpp_papi import VppEnum, MACAddress
+from vpp_vxlan_gbp_tunnel import find_vxlan_gbp_tunnel, INDEX_INVALID, \
+ VppVxlanGbpTunnel
+from vpp_neighbor import VppNeighbor
+from vpp_acl import AclRule, VppAcl
+
+NUM_PKTS = 67
+
+
+def find_gbp_endpoint(test, sw_if_index=None, ip=None, mac=None,
+ tep=None, sclass=None, flags=None):
+ if ip:
+ vip = ip
+ if mac:
+ vmac = MACAddress(mac)
+
+ eps = test.vapi.gbp_endpoint_dump()
+
+ for ep in eps:
+ if tep:
+ src = tep[0]
+ dst = tep[1]
+ if src != str(ep.endpoint.tun.src) or \
+ dst != str(ep.endpoint.tun.dst):
+ continue
+ if sw_if_index:
+ if ep.endpoint.sw_if_index != sw_if_index:
+ continue
+ if sclass:
+ if ep.endpoint.sclass != sclass:
+ continue
+ if flags:
+ if flags != (flags & ep.endpoint.flags):
+ continue
+ if ip:
+ for eip in ep.endpoint.ips:
+ if vip == str(eip):
+ return True
+ if mac:
+ if vmac == ep.endpoint.mac:
+ return True
+
+ return False
+
+
+def find_gbp_vxlan(test: VppTestCase, vni):
+ ts = test.vapi.gbp_vxlan_tunnel_dump()
+ for t in ts:
+ if t.tunnel.vni == vni:
+ return True
+ return False
+
+
+class VppGbpEndpoint(VppObject):
+ """
+ GBP Endpoint
+ """
+
+ @property
+ def mac(self):
+ return str(self.vmac)
+
+ @property
+ def ip4(self):
+ return self._ip4
+
+ @property
+ def fip4(self):
+ return self._fip4
+
+ @property
+ def ip6(self):
+ return self._ip6
+
+ @property
+ def fip6(self):
+ return self._fip6
+
+ @property
+ def ips(self):
+ return [self.ip4, self.ip6]
+
+ @property
+ def fips(self):
+ return [self.fip4, self.fip6]
+
+ def __init__(self, test, itf, epg, recirc, ip4, fip4, ip6, fip6,
+ flags=0,
+ tun_src="0.0.0.0",
+ tun_dst="0.0.0.0",
+ mac=True):
+ self._test = test
+ self.itf = itf
+ self.handle = None
+ self.epg = epg
+ self.recirc = recirc
+
+ self._ip4 = ip4
+ self._fip4 = fip4
+ self._ip6 = ip6
+ self._fip6 = fip6
+
+ if mac:
+ self.vmac = MACAddress(self.itf.remote_mac)
+ else:
+ self.vmac = MACAddress("00:00:00:00:00:00")
+
+ self.flags = flags
+ self.tun_src = tun_src
+ self.tun_dst = tun_dst
+
+ def encode(self):
+ ips = [self.ip4, self.ip6]
+ return {
+ "sw_if_index": self.itf.sw_if_index,
+ "ips": ips,
+ "n_ips": len(ips),
+ "mac": self.vmac.packed,
+ "sclass": self.epg.sclass,
+ "flags": self.flags,
+ "tun": {
+ "src": self.tun_src,
+ "dst": self.tun_dst,
+ },
+ }
+
+ def add_vpp_config(self):
+ res = self._test.vapi.gbp_endpoint_add(
+ endpoint=self.encode(),
+ )
+ self.handle = res.handle
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.gbp_endpoint_del(handle=self.handle)
+
+ def object_id(self):
+ return "gbp-endpoint:[%d==%d:%s:%d]" % (self.handle,
+ self.itf.sw_if_index,
+ self.ip4,
+ self.epg.sclass)
+
+ def query_vpp_config(self):
+ return find_gbp_endpoint(self._test,
+ self.itf.sw_if_index,
+ self.ip4)
+
+
+class VppGbpRecirc(VppObject):
+ """
+ GBP Recirculation Interface
+ """
+
+ def __init__(self, test, epg, recirc, is_ext=False):
+ self._test = test
+ self.recirc = recirc
+ self.epg = epg
+ self.is_ext = is_ext
+
+ def encode(self):
+ return {
+ "is_ext": self.is_ext,
+ "sw_if_index": self.recirc.sw_if_index,
+ "sclass": self.epg.sclass,
+ }
+
+ def add_vpp_config(self):
+ self._test.vapi.gbp_recirc_add_del(
+ 1,
+ recirc=self.encode(),
+ )
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.gbp_recirc_add_del(
+ 0,
+ recirc=self.encode(),
+ )
+
+ def object_id(self):
+ return "gbp-recirc:[%d]" % (self.recirc.sw_if_index)
+
+ def query_vpp_config(self):
+ rs = self._test.vapi.gbp_recirc_dump()
+ for r in rs:
+ if r.recirc.sw_if_index == self.recirc.sw_if_index:
+ return True
+ return False
+
+
+class VppGbpExtItf(VppObject):
+ """
+ GBP ExtItfulation Interface
+ """
+
+ def __init__(self, test, itf, bd, rd, anon=False):
+ self._test = test
+ self.itf = itf
+ self.bd = bd
+ self.rd = rd
+ self.flags = 1 if anon else 0
+
+ def encode(self):
+ return {
+ "sw_if_index": self.itf.sw_if_index,
+ "bd_id": self.bd.bd_id,
+ "rd_id": self.rd.rd_id,
+ "flags": self.flags,
+ }
+
+ def add_vpp_config(self):
+ self._test.vapi.gbp_ext_itf_add_del(
+ 1,
+ ext_itf=self.encode(),
+ )
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.gbp_ext_itf_add_del(
+ 0,
+ ext_itf=self.encode(),
+ )
+
+ def object_id(self):
+ return "gbp-ext-itf:[%d]%s" % (self.itf.sw_if_index,
+ " [anon]" if self.flags else "")
+
+ def query_vpp_config(self):
+ rs = self._test.vapi.gbp_ext_itf_dump()
+ for r in rs:
+ if r.ext_itf.sw_if_index == self.itf.sw_if_index:
+ return True
+ return False
+
+
+class VppGbpSubnet(VppObject):
+ """
+ GBP Subnet
+ """
+
+ def __init__(self, test, rd, address, address_len,
+ type, sw_if_index=0xffffffff, sclass=0xffff):
+ # TODO: replace hardcoded defaults when vpp_papi supports
+ # defaults in typedefs
+ self._test = test
+ self.rd_id = rd.rd_id
+ a = ip_address(address)
+ if 4 == a.version:
+ self.prefix = IPv4Network("%s/%d" % (address, address_len),
+ strict=False)
+ else:
+ self.prefix = IPv6Network("%s/%d" % (address, address_len),
+ strict=False)
+ self.type = type
+ self.sw_if_index = sw_if_index
+ self.sclass = sclass
+
+ def encode(self):
+ return {
+ "type": self.type,
+ "sw_if_index": self.sw_if_index,
+ "sclass": self.sclass,
+ "prefix": self.prefix,
+ "rd_id": self.rd_id,
+ }
+
+ def add_vpp_config(self):
+ self._test.vapi.gbp_subnet_add_del(
+ is_add=1,
+ subnet=self.encode(),
+ )
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.gbp_subnet_add_del(
+ is_add=0,
+ subnet=self.encode()
+ )
+
+ def object_id(self):
+ return "gbp-subnet:[%d-%s]" % (self.rd_id, self.prefix)
+
+ def query_vpp_config(self):
+ ss = self._test.vapi.gbp_subnet_dump()
+ for s in ss:
+ if s.subnet.rd_id == self.rd_id and \
+ s.subnet.type == self.type and \
+ s.subnet.prefix == self.prefix:
+ return True
+ return False
+
+
+class VppGbpEndpointRetention(object):
+ def __init__(self, remote_ep_timeout=0xffffffff):
+ self.remote_ep_timeout = remote_ep_timeout
+
+ def encode(self):
+ return {'remote_ep_timeout': self.remote_ep_timeout}
+
+
+class VppGbpEndpointGroup(VppObject):
+ """
+ GBP Endpoint Group
+ """
+
+ def __init__(self, test, vnid, sclass, rd, bd, uplink,
+ bvi, bvi_ip4, bvi_ip6=None,
+ retention=VppGbpEndpointRetention()):
+ self._test = test
+ self.uplink = uplink
+ self.bvi = bvi
+ self.bvi_ip4 = bvi_ip4
+ self.bvi_ip6 = bvi_ip6
+ self.vnid = vnid
+ self.bd = bd # VppGbpBridgeDomain
+ self.rd = rd
+ self.sclass = sclass
+ if 0 == self.sclass:
+ self.sclass = 0xffff
+ self.retention = retention
+
+ def encode(self) -> dict:
+ return {
+ "uplink_sw_if_index": self.uplink.sw_if_index
+ if self.uplink else INDEX_INVALID,
+ "bd_id": self.bd.bd.bd_id,
+ "rd_id": self.rd.rd_id,
+ "vnid": self.vnid,
+ "sclass": self.sclass,
+ "retention": self.retention.encode(),
+ }
+
+ def add_vpp_config(self):
+ self._test.vapi.gbp_endpoint_group_add(epg=self.encode())
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.gbp_endpoint_group_del(sclass=self.sclass)
+
+ def object_id(self) -> str:
+ return "gbp-endpoint-group:[%d]" % (self.vnid)
+
+ def query_vpp_config(self) -> bool:
+ epgs = self._test.vapi.gbp_endpoint_group_dump()
+ for epg in epgs:
+ if epg.epg.vnid == self.vnid:
+ return True
+ return False
+
+
+class VppGbpBridgeDomain(VppObject):
+ """
+ GBP Bridge Domain
+ """
+
+ def __init__(self, test, bd, rd, bvi,
+ uu_fwd: typing.Optional[VppVxlanGbpTunnel] = None,
+ bm_flood=None, learn=True,
+ uu_drop=False, bm_drop=False,
+ ucast_arp=False):
+ self._test = test
+ self.bvi = bvi
+ self.uu_fwd = uu_fwd
+ self.bm_flood = bm_flood
+ self.bd = bd
+ self.rd = rd
+
+ e = VppEnum.vl_api_gbp_bridge_domain_flags_t
+
+ self.flags = e.GBP_BD_API_FLAG_NONE
+ if not learn:
+ self.flags |= e.GBP_BD_API_FLAG_DO_NOT_LEARN
+ if uu_drop:
+ self.flags |= e.GBP_BD_API_FLAG_UU_FWD_DROP
+ if bm_drop:
+ self.flags |= e.GBP_BD_API_FLAG_MCAST_DROP
+ if ucast_arp:
+ self.flags |= e.GBP_BD_API_FLAG_UCAST_ARP
+
+ def encode(self) -> dict:
+ return {
+ "flags": self.flags,
+ "bvi_sw_if_index": self.bvi.sw_if_index,
+ "uu_fwd_sw_if_index": self.uu_fwd.sw_if_index
+ if self.uu_fwd else INDEX_INVALID,
+ "bm_flood_sw_if_index": self.bm_flood.sw_if_index
+ if self.bm_flood else INDEX_INVALID,
+ "bd_id": self.bd.bd_id,
+ "rd_id": self.rd.rd_id,
+ }
+
+ def add_vpp_config(self):
+ self._test.vapi.gbp_bridge_domain_add(
+ bd=self.encode(),
+ )
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.gbp_bridge_domain_del(bd_id=self.bd.bd_id)
+
+ def object_id(self) -> str:
+ return "gbp-bridge-domain:[%d]" % (self.bd.bd_id)
+
+ def query_vpp_config(self) -> bool:
+ bds = self._test.vapi.gbp_bridge_domain_dump()
+ for bd in bds:
+ if bd.bd.bd_id == self.bd.bd_id:
+ return True
+ return False
+
+
+class VppGbpRouteDomain(VppObject):
+ """
+ GBP Route Domain
+ """
+
+ def __init__(self, test, rd_id, scope, t4, t6, ip4_uu=None, ip6_uu=None):
+ self._test = test
+ self.rd_id = rd_id
+ self.scope = scope
+ self.t4 = t4
+ self.t6 = t6
+ self.ip4_uu = ip4_uu
+ self.ip6_uu = ip6_uu
+
+ def encode(self) -> dict:
+ return {
+ "rd_id": self.rd_id,
+ "scope": self.scope,
+ "ip4_table_id": self.t4.table_id,
+ "ip6_table_id": self.t6.table_id,
+ "ip4_uu_sw_if_index": self.ip4_uu.sw_if_index
+ if self.ip4_uu else INDEX_INVALID,
+ "ip6_uu_sw_if_index": self.ip6_uu.sw_if_index
+ if self.ip6_uu else INDEX_INVALID,
+
+ }
+
+ def add_vpp_config(self):
+ self._test.vapi.gbp_route_domain_add(
+ rd=self.encode(),
+ )
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.gbp_route_domain_del(rd_id=self.rd_id)
+
+ def object_id(self):
+ return "gbp-route-domain:[%d]" % (self.rd_id)
+
+ def query_vpp_config(self):
+ rds = self._test.vapi.gbp_route_domain_dump()
+ for rd in rds:
+ if rd.rd.rd_id == self.rd_id:
+ return True
+ return False
+
+
+class VppGbpContractNextHop:
+ def __init__(self, mac, bd, ip, rd):
+ self.mac = mac
+ self.ip = ip
+ self.bd = bd
+ self.rd = rd
+
+ def encode(self) -> dict:
+ return {
+ "ip": self.ip,
+ "mac": self.mac.packed,
+ "bd_id": self.bd.bd.bd_id,
+ "rd_id": self.rd.rd_id,
+ }
+
+
+class VppGbpContractRule:
+ def __init__(self, action, hash_mode, nhs=None):
+ self.action = action
+ self.hash_mode = hash_mode
+ self.nhs = [] if nhs is None else nhs
+
+ def encode(self) -> dict:
+ nhs = []
+ for nh in self.nhs:
+ nhs.append(nh.encode())
+ while len(nhs) < 8:
+ nhs.append({})
+ return {'action': self.action,
+ 'nh_set': {
+ 'hash_mode': self.hash_mode,
+ 'n_nhs': len(self.nhs),
+ 'nhs': nhs}}
+
+ def __repr__(self):
+ return '<VppGbpContractRule action=%s, hash_mode=%s>' % (
+ self.action, self.hash_mode)
+
+
+class VppGbpContract(VppObject):
+ """
+ GBP Contract
+ """
+
+ def __init__(self, test, scope, sclass, dclass, acl_index,
+ rules: list, allowed_ethertypes: list):
+ self._test = test
+ self.scope = scope
+ self.acl_index = acl_index
+ self.sclass = sclass
+ self.dclass = dclass
+ self.rules = rules
+ self.allowed_ethertypes = allowed_ethertypes
+ while (len(self.allowed_ethertypes) < 16):
+ self.allowed_ethertypes.append(0)
+
+ def encode(self) -> dict:
+ rules = []
+ for r in self.rules:
+ rules.append(r.encode())
+ return {
+ 'acl_index': self.acl_index,
+ 'scope': self.scope,
+ 'sclass': self.sclass,
+ 'dclass': self.dclass,
+ 'n_rules': len(rules),
+ 'rules': rules,
+ 'n_ether_types': len(self.allowed_ethertypes),
+ 'allowed_ethertypes': self.allowed_ethertypes,
+ }
+
+ def add_vpp_config(self):
+ r = self._test.vapi.gbp_contract_add_del(
+ is_add=1,
+ contract=self.encode()
+ )
+
+ self.stats_index = r.stats_index
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.gbp_contract_add_del(
+ is_add=0,
+ contract=self.encode(),
+ )
+
+ def object_id(self):
+ return "gbp-contract:[%d:%d:%d:%d]" % (self.scope,
+ self.sclass,
+ self.dclass,
+ self.acl_index)
+
+ def query_vpp_config(self):
+ cs = self._test.vapi.gbp_contract_dump()
+ for c in cs:
+ if c.contract.scope == self.scope \
+ and c.contract.sclass == self.sclass \
+ and c.contract.dclass == self.dclass:
+ return True
+ return False
+
+ def get_drop_stats(self):
+ c = self._test.statistics.get_counter("/net/gbp/contract/drop")
+ return c[0][self.stats_index]
+
+ def get_permit_stats(self):
+ c = self._test.statistics.get_counter("/net/gbp/contract/permit")
+ return c[0][self.stats_index]
+
+
+class VppGbpVxlanTunnel(VppInterface):
+ """
+ GBP VXLAN tunnel
+ """
+
+ def __init__(self, test, vni, bd_rd_id, mode, src):
+ super(VppGbpVxlanTunnel, self).__init__(test)
+ self._test = test
+ self.vni = vni
+ self.bd_rd_id = bd_rd_id
+ self.mode = mode
+ self.src = src
+
+ def encode(self) -> dict:
+ return {
+ "vni": self.vni,
+ "mode": self.mode,
+ "bd_rd_id": self.bd_rd_id,
+ "src": self.src,
+ }
+
+ def add_vpp_config(self):
+ r = self._test.vapi.gbp_vxlan_tunnel_add(
+ tunnel=self.encode(),
+ )
+ self.set_sw_if_index(r.sw_if_index)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.gbp_vxlan_tunnel_del(vni=self.vni)
+
+ def object_id(self):
+ return "gbp-vxlan:%d" % (self.sw_if_index)
+
+ def query_vpp_config(self):
+ return find_gbp_vxlan(self._test, self.vni)
+
+
+@tag_fixme_vpp_workers
+class TestGBP(VppTestCase):
+ """ GBP Test Case """
+
+ @property
+ def nat_config_flags(self):
+ return VppEnum.vl_api_nat_config_flags_t
+
+ @property
+ def nat44_config_flags(self):
+ return VppEnum.vl_api_nat44_config_flags_t
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestGBP, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestGBP, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestGBP, self).setUp()
+
+ self.create_pg_interfaces(range(9))
+ self.create_loopback_interfaces(8)
+
+ self.router_mac = MACAddress("00:11:22:33:44:55")
+
+ for i in self.pg_interfaces:
+ i.admin_up()
+ for i in self.lo_interfaces:
+ i.admin_up()
+
+ self.vlan_100 = VppDot1QSubint(self, self.pg0, 100)
+ self.vlan_100.admin_up()
+ self.vlan_101 = VppDot1QSubint(self, self.pg0, 101)
+ self.vlan_101.admin_up()
+ self.vlan_102 = VppDot1QSubint(self, self.pg0, 102)
+ self.vlan_102.admin_up()
+
+ def tearDown(self):
+ for i in self.pg_interfaces:
+ i.admin_down()
+ super(TestGBP, self).tearDown()
+ for i in self.lo_interfaces:
+ i.remove_vpp_config()
+ self.lo_interfaces = []
+ self.vlan_102.remove_vpp_config()
+ self.vlan_101.remove_vpp_config()
+ self.vlan_100.remove_vpp_config()
+
+ def send_and_expect_bridged(self, src, tx, dst):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, tx[0][Ether].src)
+ self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
+ self.assertEqual(r[IP].src, tx[0][IP].src)
+ self.assertEqual(r[IP].dst, tx[0][IP].dst)
+ return rx
+
+ def send_and_expect_bridged6(self, src, tx, dst):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, tx[0][Ether].src)
+ self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
+ self.assertEqual(r[IPv6].src, tx[0][IPv6].src)
+ self.assertEqual(r[IPv6].dst, tx[0][IPv6].dst)
+ return rx
+
+ def send_and_expect_routed(self, src, tx, dst, src_mac):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, src_mac)
+ self.assertEqual(r[Ether].dst, dst.remote_mac)
+ self.assertEqual(r[IP].src, tx[0][IP].src)
+ self.assertEqual(r[IP].dst, tx[0][IP].dst)
+ return rx
+
+ def send_and_expect_routed6(self, src, tx, dst, src_mac):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, src_mac)
+ self.assertEqual(r[Ether].dst, dst.remote_mac)
+ self.assertEqual(r[IPv6].src, tx[0][IPv6].src)
+ self.assertEqual(r[IPv6].dst, tx[0][IPv6].dst)
+ return rx
+
+ def send_and_expect_natted(self, src, tx, dst, src_ip):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, tx[0][Ether].src)
+ self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
+ self.assertEqual(r[IP].src, src_ip)
+ self.assertEqual(r[IP].dst, tx[0][IP].dst)
+ return rx
+
+ def send_and_expect_natted6(self, src, tx, dst, src_ip):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, tx[0][Ether].src)
+ self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
+ self.assertEqual(r[IPv6].src, src_ip)
+ self.assertEqual(r[IPv6].dst, tx[0][IPv6].dst)
+ return rx
+
+ def send_and_expect_unnatted(self, src, tx, dst, dst_ip):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, tx[0][Ether].src)
+ self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
+ self.assertEqual(r[IP].dst, dst_ip)
+ self.assertEqual(r[IP].src, tx[0][IP].src)
+ return rx
+
+ def send_and_expect_unnatted6(self, src, tx, dst, dst_ip):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, tx[0][Ether].src)
+ self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
+ self.assertEqual(r[IPv6].dst, dst_ip)
+ self.assertEqual(r[IPv6].src, tx[0][IPv6].src)
+ return rx
+
+ def send_and_expect_double_natted(self, src, tx, dst, src_ip, dst_ip):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, str(self.router_mac))
+ self.assertEqual(r[Ether].dst, dst.remote_mac)
+ self.assertEqual(r[IP].dst, dst_ip)
+ self.assertEqual(r[IP].src, src_ip)
+ return rx
+
+ def send_and_expect_double_natted6(self, src, tx, dst, src_ip, dst_ip):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, str(self.router_mac))
+ self.assertEqual(r[Ether].dst, dst.remote_mac)
+ self.assertEqual(r[IPv6].dst, dst_ip)
+ self.assertEqual(r[IPv6].src, src_ip)
+ return rx
+
+ def send_and_expect_no_arp(self, src, tx, dst):
+ self.pg_send(src, tx)
+ dst.get_capture(0, timeout=1)
+ dst.assert_nothing_captured(remark="")
+
+ def send_and_expect_arp(self, src, tx, dst):
+ rx = self.send_and_expect(src, tx, dst)
+
+ for r in rx:
+ self.assertEqual(r[Ether].src, tx[0][Ether].src)
+ self.assertEqual(r[Ether].dst, tx[0][Ether].dst)
+ self.assertEqual(r[ARP].psrc, tx[0][ARP].psrc)
+ self.assertEqual(r[ARP].pdst, tx[0][ARP].pdst)
+ self.assertEqual(r[ARP].hwsrc, tx[0][ARP].hwsrc)
+ self.assertEqual(r[ARP].hwdst, tx[0][ARP].hwdst)
+ return rx
+
+ def test_gbp(self):
+ """ Group Based Policy """
+
+ ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
+
+ #
+ # Route Domains
+ #
+ gt4 = VppIpTable(self, 0)
+ gt4.add_vpp_config()
+ gt6 = VppIpTable(self, 0, is_ip6=True)
+ gt6.add_vpp_config()
+ nt4 = VppIpTable(self, 20)
+ nt4.add_vpp_config()
+ nt6 = VppIpTable(self, 20, is_ip6=True)
+ nt6.add_vpp_config()
+
+ rd0 = VppGbpRouteDomain(self, 0, 400, gt4, gt6, None, None)
+ rd20 = VppGbpRouteDomain(self, 20, 420, nt4, nt6, None, None)
+
+ rd0.add_vpp_config()
+ rd20.add_vpp_config()
+
+ #
+ # Bridge Domains
+ #
+ bd1 = VppBridgeDomain(self, 1)
+ bd2 = VppBridgeDomain(self, 2)
+ bd20 = VppBridgeDomain(self, 20)
+
+ bd1.add_vpp_config()
+ bd2.add_vpp_config()
+ bd20.add_vpp_config()
+
+ gbd1 = VppGbpBridgeDomain(self, bd1, rd0, self.loop0)
+ gbd2 = VppGbpBridgeDomain(self, bd2, rd0, self.loop1)
+ gbd20 = VppGbpBridgeDomain(self, bd20, rd20, self.loop2)
+
+ gbd1.add_vpp_config()
+ gbd2.add_vpp_config()
+ gbd20.add_vpp_config()
+
+ #
+ # 3 EPGs, 2 of which share a BD.
+ # 2 NAT EPGs, one for floating-IP subnets, the other for internet
+ #
+ epgs = [VppGbpEndpointGroup(self, 220, 1220, rd0, gbd1,
+ self.pg4, self.loop0,
+ "10.0.0.128", "2001:10::128"),
+ VppGbpEndpointGroup(self, 221, 1221, rd0, gbd1,
+ self.pg5, self.loop0,
+ "10.0.1.128", "2001:10:1::128"),
+ VppGbpEndpointGroup(self, 222, 1222, rd0, gbd2,
+ self.pg6, self.loop1,
+ "10.0.2.128", "2001:10:2::128"),
+ VppGbpEndpointGroup(self, 333, 1333, rd20, gbd20,
+ self.pg7, self.loop2,
+ "11.0.0.128", "3001::128"),
+ VppGbpEndpointGroup(self, 444, 1444, rd20, gbd20,
+ self.pg8, self.loop2,
+ "11.0.0.129", "3001::129")]
+ recircs = [VppGbpRecirc(self, epgs[0], self.loop3),
+ VppGbpRecirc(self, epgs[1], self.loop4),
+ VppGbpRecirc(self, epgs[2], self.loop5),
+ VppGbpRecirc(self, epgs[3], self.loop6, is_ext=True),
+ VppGbpRecirc(self, epgs[4], self.loop7, is_ext=True)]
+
+ epg_nat = epgs[3]
+ recirc_nat = recircs[3]
+
+ #
+ # 4 end-points, 2 in the same subnet, 3 in the same BD
+ #
+ eps = [VppGbpEndpoint(self, self.pg0,
+ epgs[0], recircs[0],
+ "10.0.0.1", "11.0.0.1",
+ "2001:10::1", "3001::1"),
+ VppGbpEndpoint(self, self.pg1,
+ epgs[0], recircs[0],
+ "10.0.0.2", "11.0.0.2",
+ "2001:10::2", "3001::2"),
+ VppGbpEndpoint(self, self.pg2,
+ epgs[1], recircs[1],
+ "10.0.1.1", "11.0.0.3",
+ "2001:10:1::1", "3001::3"),
+ VppGbpEndpoint(self, self.pg3,
+ epgs[2], recircs[2],
+ "10.0.2.1", "11.0.0.4",
+ "2001:10:2::1", "3001::4")]
+
+ self.vapi.nat44_ed_plugin_enable_disable(enable=1)
+ self.vapi.nat66_plugin_enable_disable(enable=1)
+
+ #
+ # Config related to each of the EPGs
+ #
+ for epg in epgs:
+ # IP config on the BVI interfaces
+ if epg != epgs[1] and epg != epgs[4]:
+ b4 = VppIpInterfaceBind(self, epg.bvi,
+ epg.rd.t4).add_vpp_config()
+ b6 = VppIpInterfaceBind(self, epg.bvi,
+ epg.rd.t6).add_vpp_config()
+ epg.bvi.set_mac(self.router_mac)
+
+ # The BVIs are NAT inside interfaces
+ flags = self.nat_config_flags.NAT_IS_INSIDE
+ self.vapi.nat44_interface_add_del_feature(
+ sw_if_index=epg.bvi.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat66_add_del_interface(
+ sw_if_index=epg.bvi.sw_if_index,
+ flags=flags, is_add=1)
+
+ if_ip4 = VppIpInterfaceAddress(self, epg.bvi,
+ epg.bvi_ip4, 32,
+ bind=b4).add_vpp_config()
+ if_ip6 = VppIpInterfaceAddress(self, epg.bvi,
+ epg.bvi_ip6, 128,
+ bind=b6).add_vpp_config()
+
+ # EPG uplink interfaces in the RD
+ VppIpInterfaceBind(self, epg.uplink, epg.rd.t4).add_vpp_config()
+ VppIpInterfaceBind(self, epg.uplink, epg.rd.t6).add_vpp_config()
+
+ # add the BD ARP termination entry for BVI IP
+ epg.bd_arp_ip4 = VppBridgeDomainArpEntry(self, epg.bd.bd,
+ str(self.router_mac),
+ epg.bvi_ip4)
+ epg.bd_arp_ip6 = VppBridgeDomainArpEntry(self, epg.bd.bd,
+ str(self.router_mac),
+ epg.bvi_ip6)
+ epg.bd_arp_ip4.add_vpp_config()
+ epg.bd_arp_ip6.add_vpp_config()
+
+ # EPG in VPP
+ epg.add_vpp_config()
+
+ for recirc in recircs:
+ # EPG's ingress recirculation interface maps to its RD
+ VppIpInterfaceBind(self, recirc.recirc,
+ recirc.epg.rd.t4).add_vpp_config()
+ VppIpInterfaceBind(self, recirc.recirc,
+ recirc.epg.rd.t6).add_vpp_config()
+
+ self.vapi.nat44_interface_add_del_feature(
+ sw_if_index=recirc.recirc.sw_if_index, is_add=1)
+ self.vapi.nat66_add_del_interface(
+ sw_if_index=recirc.recirc.sw_if_index, is_add=1)
+
+ recirc.add_vpp_config()
+
+ for recirc in recircs:
+ self.assertTrue(find_bridge_domain_port(self,
+ recirc.epg.bd.bd.bd_id,
+ recirc.recirc.sw_if_index))
+
+ for ep in eps:
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ #
+ # routes to the endpoints. We need these since there are no
+ # adj-fibs due to the fact the the BVI address has /32 and
+ # the subnet is not attached.
+ #
+ for (ip, fip) in zip(ep.ips, ep.fips):
+ # Add static mappings for each EP from the 10/8 to 11/8 network
+ if ip_address(ip).version == 4:
+ flags = self.nat_config_flags.NAT_IS_ADDR_ONLY
+ self.vapi.nat44_add_del_static_mapping(
+ is_add=1,
+ local_ip_address=ip,
+ external_ip_address=fip,
+ external_sw_if_index=0xFFFFFFFF,
+ vrf_id=0,
+ flags=flags)
+ else:
+ self.vapi.nat66_add_del_static_mapping(
+ local_ip_address=ip,
+ external_ip_address=fip,
+ vrf_id=0, is_add=1)
+
+ # VPP EP create ...
+ ep.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh gbp endpoint"))
+
+ # ... results in a Gratuitous ARP/ND on the EPG's uplink
+ rx = ep.epg.uplink.get_capture(len(ep.ips), timeout=0.2)
+
+ for ii, ip in enumerate(ep.ips):
+ p = rx[ii]
+
+ if ip_address(ip).version == 6:
+ self.assertTrue(p.haslayer(ICMPv6ND_NA))
+ self.assertEqual(p[ICMPv6ND_NA].tgt, ip)
+ else:
+ self.assertTrue(p.haslayer(ARP))
+ self.assertEqual(p[ARP].psrc, ip)
+ self.assertEqual(p[ARP].pdst, ip)
+
+ # add the BD ARP termination entry for floating IP
+ for fip in ep.fips:
+ ba = VppBridgeDomainArpEntry(self, epg_nat.bd.bd, ep.mac,
+ fip)
+ ba.add_vpp_config()
+
+ # floating IPs route via EPG recirc
+ r = VppIpRoute(
+ self, fip, ip_address(fip).max_prefixlen,
+ [VppRoutePath(fip,
+ ep.recirc.recirc.sw_if_index,
+ type=FibPathType.FIB_PATH_TYPE_DVR,
+ proto=get_dpo_proto(fip))],
+ table_id=20)
+ r.add_vpp_config()
+
+ # L2 FIB entries in the NAT EPG BD to bridge the packets from
+ # the outside direct to the internal EPG
+ lf = VppL2FibEntry(self, epg_nat.bd.bd, ep.mac,
+ ep.recirc.recirc, bvi_mac=0)
+ lf.add_vpp_config()
+
+ #
+ # ARP packets for unknown IP are sent to the EPG uplink
+ #
+ pkt_arp = (Ether(dst="ff:ff:ff:ff:ff:ff",
+ src=self.pg0.remote_mac) /
+ ARP(op="who-has",
+ hwdst="ff:ff:ff:ff:ff:ff",
+ hwsrc=self.pg0.remote_mac,
+ pdst="10.0.0.88",
+ psrc="10.0.0.99"))
+
+ self.vapi.cli("clear trace")
+ self.pg0.add_stream(pkt_arp)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rxd = epgs[0].uplink.get_capture(1)
+
+ #
+ # ARP/ND packets get a response
+ #
+ pkt_arp = (Ether(dst="ff:ff:ff:ff:ff:ff",
+ src=self.pg0.remote_mac) /
+ ARP(op="who-has",
+ hwdst="ff:ff:ff:ff:ff:ff",
+ hwsrc=self.pg0.remote_mac,
+ pdst=epgs[0].bvi_ip4,
+ psrc=eps[0].ip4))
+
+ self.send_and_expect(self.pg0, [pkt_arp], self.pg0)
+
+ nsma = in6_getnsma(inet_pton(AF_INET6, eps[0].ip6))
+ d = inet_ntop(AF_INET6, nsma)
+ pkt_nd = (Ether(dst=in6_getnsmac(nsma),
+ src=self.pg0.remote_mac) /
+ IPv6(dst=d, src=eps[0].ip6) /
+ ICMPv6ND_NS(tgt=epgs[0].bvi_ip6) /
+ ICMPv6NDOptSrcLLAddr(lladdr=self.pg0.remote_mac))
+ self.send_and_expect(self.pg0, [pkt_nd], self.pg0)
+
+ #
+ # broadcast packets are flooded
+ #
+ pkt_bcast = (Ether(dst="ff:ff:ff:ff:ff:ff",
+ src=self.pg0.remote_mac) /
+ IP(src=eps[0].ip4, dst="232.1.1.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.vapi.cli("clear trace")
+ self.pg0.add_stream(pkt_bcast)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rxd = eps[1].itf.get_capture(1)
+ self.assertEqual(rxd[0][Ether].dst, pkt_bcast[Ether].dst)
+ rxd = epgs[0].uplink.get_capture(1)
+ self.assertEqual(rxd[0][Ether].dst, pkt_bcast[Ether].dst)
+
+ #
+ # packets to non-local L3 destinations dropped
+ #
+ pkt_intra_epg_220_ip4 = (Ether(src=self.pg0.remote_mac,
+ dst=str(self.router_mac)) /
+ IP(src=eps[0].ip4,
+ dst="10.0.0.99") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ pkt_inter_epg_222_ip4 = (Ether(src=self.pg0.remote_mac,
+ dst=str(self.router_mac)) /
+ IP(src=eps[0].ip4,
+ dst="10.0.1.99") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_assert_no_replies(self.pg0,
+ pkt_intra_epg_220_ip4 * NUM_PKTS)
+
+ pkt_inter_epg_222_ip6 = (Ether(src=self.pg0.remote_mac,
+ dst=str(self.router_mac)) /
+ IPv6(src=eps[0].ip6,
+ dst="2001:10::99") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ self.send_and_assert_no_replies(self.pg0,
+ pkt_inter_epg_222_ip6 * NUM_PKTS)
+
+ #
+ # Add the subnet routes
+ #
+ s41 = VppGbpSubnet(
+ self, rd0, "10.0.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_INTERNAL)
+ s42 = VppGbpSubnet(
+ self, rd0, "10.0.1.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_INTERNAL)
+ s43 = VppGbpSubnet(
+ self, rd0, "10.0.2.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_INTERNAL)
+ s61 = VppGbpSubnet(
+ self, rd0, "2001:10::1", 64,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_INTERNAL)
+ s62 = VppGbpSubnet(
+ self, rd0, "2001:10:1::1", 64,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_INTERNAL)
+ s63 = VppGbpSubnet(
+ self, rd0, "2001:10:2::1", 64,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_INTERNAL)
+ s41.add_vpp_config()
+ s42.add_vpp_config()
+ s43.add_vpp_config()
+ s61.add_vpp_config()
+ s62.add_vpp_config()
+ s63.add_vpp_config()
+
+ self.send_and_expect_bridged(eps[0].itf,
+ pkt_intra_epg_220_ip4 * NUM_PKTS,
+ eps[0].epg.uplink)
+ self.send_and_expect_bridged(eps[0].itf,
+ pkt_inter_epg_222_ip4 * NUM_PKTS,
+ eps[0].epg.uplink)
+ self.send_and_expect_bridged6(eps[0].itf,
+ pkt_inter_epg_222_ip6 * NUM_PKTS,
+ eps[0].epg.uplink)
+
+ self.logger.info(self.vapi.cli("sh ip fib 11.0.0.2"))
+ self.logger.info(self.vapi.cli("sh gbp endpoint-group"))
+ self.logger.info(self.vapi.cli("sh gbp endpoint"))
+ self.logger.info(self.vapi.cli("sh gbp recirc"))
+ self.logger.info(self.vapi.cli("sh int"))
+ self.logger.info(self.vapi.cli("sh int addr"))
+ self.logger.info(self.vapi.cli("sh int feat loop6"))
+ self.logger.info(self.vapi.cli("sh vlib graph ip4-gbp-src-classify"))
+ self.logger.info(self.vapi.cli("sh int feat loop3"))
+ self.logger.info(self.vapi.cli("sh int feat pg0"))
+
+ #
+ # Packet destined to unknown unicast is sent on the epg uplink ...
+ #
+ pkt_intra_epg_220_to_uplink = (Ether(src=self.pg0.remote_mac,
+ dst="00:00:00:33:44:55") /
+ IP(src=eps[0].ip4,
+ dst="10.0.0.99") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect_bridged(eps[0].itf,
+ pkt_intra_epg_220_to_uplink * NUM_PKTS,
+ eps[0].epg.uplink)
+ # ... and nowhere else
+ self.pg1.get_capture(0, timeout=0.1)
+ self.pg1.assert_nothing_captured(remark="Flood onto other VMS")
+
+ pkt_intra_epg_221_to_uplink = (Ether(src=self.pg2.remote_mac,
+ dst="00:00:00:33:44:66") /
+ IP(src=eps[0].ip4,
+ dst="10.0.0.99") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect_bridged(eps[2].itf,
+ pkt_intra_epg_221_to_uplink * NUM_PKTS,
+ eps[2].epg.uplink)
+
+ #
+ # Packets from the uplink are forwarded in the absence of a contract
+ #
+ pkt_intra_epg_220_from_uplink = (Ether(src="00:00:00:33:44:55",
+ dst=self.pg0.remote_mac) /
+ IP(src=eps[0].ip4,
+ dst="10.0.0.99") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect_bridged(self.pg4,
+ pkt_intra_epg_220_from_uplink * NUM_PKTS,
+ self.pg0)
+
+ #
+ # in the absence of policy, endpoints in the same EPG
+ # can communicate
+ #
+ pkt_intra_epg = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg1.remote_mac) /
+ IP(src=eps[0].ip4,
+ dst=eps[1].ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect_bridged(self.pg0,
+ pkt_intra_epg * NUM_PKTS,
+ self.pg1)
+
+ #
+ # in the absence of policy, endpoints in the different EPG
+ # cannot communicate
+ #
+ pkt_inter_epg_220_to_221 = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg2.remote_mac) /
+ IP(src=eps[0].ip4,
+ dst=eps[2].ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ pkt_inter_epg_221_to_220 = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg0.remote_mac) /
+ IP(src=eps[2].ip4,
+ dst=eps[0].ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ pkt_inter_epg_220_to_222 = (Ether(src=self.pg0.remote_mac,
+ dst=str(self.router_mac)) /
+ IP(src=eps[0].ip4,
+ dst=eps[3].ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_assert_no_replies(eps[0].itf,
+ pkt_inter_epg_220_to_221 * NUM_PKTS)
+ self.send_and_assert_no_replies(eps[0].itf,
+ pkt_inter_epg_220_to_222 * NUM_PKTS)
+
+ #
+ # A uni-directional contract from EPG 220 -> 221
+ #
+ rule = AclRule(is_permit=1, proto=17)
+ rule2 = AclRule(src_prefix=IPv6Network((0, 0)),
+ dst_prefix=IPv6Network((0, 0)), is_permit=1, proto=17)
+ acl = VppAcl(self, rules=[rule, rule2])
+ acl.add_vpp_config()
+
+ c1 = VppGbpContract(
+ self, 400, epgs[0].sclass, epgs[1].sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c1.add_vpp_config()
+
+ self.send_and_expect_bridged(eps[0].itf,
+ pkt_inter_epg_220_to_221 * NUM_PKTS,
+ eps[2].itf)
+ self.send_and_assert_no_replies(eps[0].itf,
+ pkt_inter_epg_220_to_222 * NUM_PKTS)
+
+ #
+ # contract for the return direction
+ #
+ c2 = VppGbpContract(
+ self, 400, epgs[1].sclass, epgs[0].sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c2.add_vpp_config()
+
+ self.send_and_expect_bridged(eps[0].itf,
+ pkt_inter_epg_220_to_221 * NUM_PKTS,
+ eps[2].itf)
+ self.send_and_expect_bridged(eps[2].itf,
+ pkt_inter_epg_221_to_220 * NUM_PKTS,
+ eps[0].itf)
+
+ ds = c2.get_drop_stats()
+ self.assertEqual(ds['packets'], 0)
+ ps = c2.get_permit_stats()
+ self.assertEqual(ps['packets'], NUM_PKTS)
+
+ #
+ # the contract does not allow non-IP
+ #
+ pkt_non_ip_inter_epg_220_to_221 = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg2.remote_mac) /
+ ARP())
+ self.send_and_assert_no_replies(eps[0].itf,
+ pkt_non_ip_inter_epg_220_to_221 * 17)
+
+ #
+ # check that inter group is still disabled for the groups
+ # not in the contract.
+ #
+ self.send_and_assert_no_replies(eps[0].itf,
+ pkt_inter_epg_220_to_222 * NUM_PKTS)
+
+ #
+ # A uni-directional contract from EPG 220 -> 222 'L3 routed'
+ #
+ c3 = VppGbpContract(
+ self, 400, epgs[0].sclass, epgs[2].sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c3.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh gbp contract"))
+
+ self.send_and_expect_routed(eps[0].itf,
+ pkt_inter_epg_220_to_222 * NUM_PKTS,
+ eps[3].itf,
+ str(self.router_mac))
+ #
+ # remove both contracts, traffic stops in both directions
+ #
+ c2.remove_vpp_config()
+ c1.remove_vpp_config()
+ c3.remove_vpp_config()
+ acl.remove_vpp_config()
+
+ self.send_and_assert_no_replies(eps[2].itf,
+ pkt_inter_epg_221_to_220 * NUM_PKTS)
+ self.send_and_assert_no_replies(eps[0].itf,
+ pkt_inter_epg_220_to_221 * NUM_PKTS)
+ self.send_and_expect_bridged(eps[0].itf,
+ pkt_intra_epg * NUM_PKTS,
+ eps[1].itf)
+
+ #
+ # EPs to the outside world
+ #
+
+ # in the EP's RD an external subnet via the NAT EPG's recirc
+ se1 = VppGbpSubnet(
+ self, rd0, "0.0.0.0", 0,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_EXTERNAL,
+ sw_if_index=recirc_nat.recirc.sw_if_index,
+ sclass=epg_nat.sclass)
+ se2 = VppGbpSubnet(
+ self, rd0, "11.0.0.0", 8,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_EXTERNAL,
+ sw_if_index=recirc_nat.recirc.sw_if_index,
+ sclass=epg_nat.sclass)
+ se16 = VppGbpSubnet(
+ self, rd0, "::", 0,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_EXTERNAL,
+ sw_if_index=recirc_nat.recirc.sw_if_index,
+ sclass=epg_nat.sclass)
+ # in the NAT RD an external subnet via the NAT EPG's uplink
+ se3 = VppGbpSubnet(
+ self, rd20, "0.0.0.0", 0,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_EXTERNAL,
+ sw_if_index=epg_nat.uplink.sw_if_index,
+ sclass=epg_nat.sclass)
+ se36 = VppGbpSubnet(
+ self, rd20, "::", 0,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_EXTERNAL,
+ sw_if_index=epg_nat.uplink.sw_if_index,
+ sclass=epg_nat.sclass)
+ se4 = VppGbpSubnet(
+ self, rd20, "11.0.0.0", 8,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_STITCHED_EXTERNAL,
+ sw_if_index=epg_nat.uplink.sw_if_index,
+ sclass=epg_nat.sclass)
+ se1.add_vpp_config()
+ se2.add_vpp_config()
+ se16.add_vpp_config()
+ se3.add_vpp_config()
+ se36.add_vpp_config()
+ se4.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh ip fib 0.0.0.0/0"))
+ self.logger.info(self.vapi.cli("sh ip fib 11.0.0.1"))
+ self.logger.info(self.vapi.cli("sh ip6 fib ::/0"))
+ self.logger.info(self.vapi.cli("sh ip6 fib %s" %
+ eps[0].fip6))
+
+ #
+ # From an EP to an outside address: IN2OUT
+ #
+ pkt_inter_epg_220_to_global = (Ether(src=self.pg0.remote_mac,
+ dst=str(self.router_mac)) /
+ IP(src=eps[0].ip4,
+ dst="1.1.1.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ # no policy yet
+ self.send_and_assert_no_replies(eps[0].itf,
+ pkt_inter_epg_220_to_global * NUM_PKTS)
+ rule = AclRule(is_permit=1, proto=17, ports=1234)
+ rule2 = AclRule(is_permit=1, proto=17, ports=1234,
+ src_prefix=IPv6Network((0, 0)),
+ dst_prefix=IPv6Network((0, 0)))
+ acl2 = VppAcl(self, rules=[rule, rule2])
+ acl2.add_vpp_config()
+
+ c4 = VppGbpContract(
+ self, 400, epgs[0].sclass, epgs[3].sclass, acl2.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c4.add_vpp_config()
+
+ self.send_and_expect_natted(eps[0].itf,
+ pkt_inter_epg_220_to_global * NUM_PKTS,
+ self.pg7,
+ eps[0].fip4)
+
+ pkt_inter_epg_220_to_global = (Ether(src=self.pg0.remote_mac,
+ dst=str(self.router_mac)) /
+ IPv6(src=eps[0].ip6,
+ dst="6001::1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect_natted6(self.pg0,
+ pkt_inter_epg_220_to_global * NUM_PKTS,
+ self.pg7,
+ eps[0].fip6)
+ #
+ # From a global address to an EP: OUT2IN
+ #
+ pkt_inter_epg_220_from_global = (Ether(src=str(self.router_mac),
+ dst=self.pg0.remote_mac) /
+ IP(dst=eps[0].fip4,
+ src="1.1.1.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_assert_no_replies(
+ self.pg7, pkt_inter_epg_220_from_global * NUM_PKTS)
+
+ c5 = VppGbpContract(
+ self, 400, epgs[3].sclass, epgs[0].sclass, acl2.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c5.add_vpp_config()
+
+ self.send_and_expect_unnatted(self.pg7,
+ pkt_inter_epg_220_from_global * NUM_PKTS,
+ eps[0].itf,
+ eps[0].ip4)
+
+ pkt_inter_epg_220_from_global = (Ether(src=str(self.router_mac),
+ dst=self.pg0.remote_mac) /
+ IPv6(dst=eps[0].fip6,
+ src="6001::1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect_unnatted6(
+ self.pg7,
+ pkt_inter_epg_220_from_global * NUM_PKTS,
+ eps[0].itf,
+ eps[0].ip6)
+
+ #
+ # From a local VM to another local VM using resp. public addresses:
+ # IN2OUT2IN
+ #
+ pkt_intra_epg_220_global = (Ether(src=self.pg0.remote_mac,
+ dst=str(self.router_mac)) /
+ IP(src=eps[0].ip4,
+ dst=eps[1].fip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect_double_natted(eps[0].itf,
+ pkt_intra_epg_220_global * NUM_PKTS,
+ eps[1].itf,
+ eps[0].fip4,
+ eps[1].ip4)
+
+ pkt_intra_epg_220_global = (Ether(src=self.pg0.remote_mac,
+ dst=str(self.router_mac)) /
+ IPv6(src=eps[0].ip6,
+ dst=eps[1].fip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect_double_natted6(
+ eps[0].itf,
+ pkt_intra_epg_220_global * NUM_PKTS,
+ eps[1].itf,
+ eps[0].fip6,
+ eps[1].ip6)
+
+ #
+ # cleanup
+ #
+ self.vapi.nat44_ed_plugin_enable_disable(enable=0)
+ self.vapi.nat66_plugin_enable_disable(enable=0)
+
+ def wait_for_ep_timeout(self, sw_if_index=None, ip=None, mac=None,
+ tep=None, n_tries=100, s_time=1):
+ # only learnt EP can timeout
+ ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
+ flags = ep_flags.GBP_API_ENDPOINT_FLAG_LEARNT
+ while (n_tries):
+ if not find_gbp_endpoint(self, sw_if_index, ip, mac, tep=tep,
+ flags=flags):
+ return True
+ n_tries = n_tries - 1
+ self.sleep(s_time)
+ self.assertFalse(find_gbp_endpoint(self, sw_if_index, ip, mac, tep=tep,
+ flags=flags))
+ return False
+
+ def test_gbp_learn_l2(self):
+ """ GBP L2 Endpoint Learning """
+
+ drop_no_contract = self.statistics.get_err_counter(
+ '/err/gbp-policy-port/drop-no-contract')
+ allow_intra_class = self.statistics.get_err_counter(
+ '/err/gbp-policy-port/allow-intra-sclass')
+
+ ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
+ learnt = [{'mac': '00:00:11:11:11:01',
+ 'ip': '10.0.0.1',
+ 'ip6': '2001:10::2'},
+ {'mac': '00:00:11:11:11:02',
+ 'ip': '10.0.0.2',
+ 'ip6': '2001:10::3'}]
+
+ #
+ # IP tables
+ #
+ gt4 = VppIpTable(self, 1)
+ gt4.add_vpp_config()
+ gt6 = VppIpTable(self, 1, is_ip6=True)
+ gt6.add_vpp_config()
+
+ rd1 = VppGbpRouteDomain(self, 1, 401, gt4, gt6)
+ rd1.add_vpp_config()
+
+ #
+ # Pg2 hosts the vxlan tunnel, hosts on pg2 to act as TEPs
+ # Pg3 hosts the IP4 UU-flood VXLAN tunnel
+ # Pg4 hosts the IP6 UU-flood VXLAN tunnel
+ #
+ self.pg2.config_ip4()
+ self.pg2.resolve_arp()
+ self.pg2.generate_remote_hosts(4)
+ self.pg2.configure_ipv4_neighbors()
+ self.pg3.config_ip4()
+ self.pg3.resolve_arp()
+ self.pg4.config_ip4()
+ self.pg4.resolve_arp()
+
+ #
+ # Add a mcast destination VXLAN-GBP tunnel for B&M traffic
+ #
+ tun_bm = VppVxlanGbpTunnel(self, self.pg4.local_ip4,
+ "239.1.1.1", 88,
+ mcast_itf=self.pg4)
+ tun_bm.add_vpp_config()
+
+ #
+ # a GBP bridge domain with a BVI and a UU-flood interface
+ #
+ bd1 = VppBridgeDomain(self, 1)
+ bd1.add_vpp_config()
+ gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0,
+ self.pg3, tun_bm)
+ gbd1.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh bridge 1 detail"))
+ self.logger.info(self.vapi.cli("sh gbp bridge"))
+
+ # ... and has a /32 applied
+ ip_addr = VppIpInterfaceAddress(self, gbd1.bvi, "10.0.0.128", 32)
+ ip_addr.add_vpp_config()
+
+ #
+ # The Endpoint-group in which we are learning endpoints
+ #
+ epg_220 = VppGbpEndpointGroup(self, 220, 112, rd1, gbd1,
+ None, self.loop0,
+ "10.0.0.128",
+ "2001:10::128",
+ VppGbpEndpointRetention(4))
+ epg_220.add_vpp_config()
+ epg_330 = VppGbpEndpointGroup(self, 330, 113, rd1, gbd1,
+ None, self.loop1,
+ "10.0.1.128",
+ "2001:11::128",
+ VppGbpEndpointRetention(4))
+ epg_330.add_vpp_config()
+
+ #
+ # The VXLAN GBP tunnel is a bridge-port and has L2 endpoint
+ # learning enabled
+ #
+ vx_tun_l2_1 = VppGbpVxlanTunnel(
+ self, 99, bd1.bd_id,
+ VppEnum.vl_api_gbp_vxlan_tunnel_mode_t.GBP_VXLAN_TUNNEL_MODE_L2,
+ self.pg2.local_ip4)
+ vx_tun_l2_1.add_vpp_config()
+
+ #
+ # A static endpoint that the learnt endpoints are trying to
+ # talk to
+ #
+ ep = VppGbpEndpoint(self, self.pg0,
+ epg_220, None,
+ "10.0.0.127", "11.0.0.127",
+ "2001:10::1", "3001::1")
+ ep.add_vpp_config()
+
+ self.assertTrue(find_route(self, ep.ip4, 32, table_id=1))
+
+ # a packet with an sclass from an unknown EPG
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[0].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=88, flags=0x88) /
+ Ether(src=learnt[0]["mac"], dst=ep.mac) /
+ IP(src=learnt[0]["ip"], dst=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_assert_no_replies(self.pg2, p)
+
+ self.logger.info(self.vapi.cli("sh error"))
+ self.assert_error_counter_equal(
+ '/err/gbp-policy-port/drop-no-contract',
+ drop_no_contract + 1)
+
+ #
+ # we should not have learnt a new tunnel endpoint, since
+ # the EPG was not learnt.
+ #
+ self.assertEqual(INDEX_INVALID,
+ find_vxlan_gbp_tunnel(self,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[0].ip4,
+ 99))
+
+ # ep is not learnt, because the EPG is unknown
+ self.assertEqual(len(self.vapi.gbp_endpoint_dump()), 1)
+
+ #
+ # Learn new EPs from IP packets
+ #
+ for ii, l in enumerate(learnt):
+ # a packet with an sclass from a known EPG
+ # arriving on an unknown TEP
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=112, flags=0x88) /
+ Ether(src=l['mac'], dst=ep.mac) /
+ IP(src=l['ip'], dst=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, [p], self.pg0)
+
+ # the new TEP
+ tep1_sw_if_index = find_vxlan_gbp_tunnel(
+ self,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[1].ip4,
+ 99)
+ self.assertNotEqual(INDEX_INVALID, tep1_sw_if_index)
+
+ #
+ # the EP is learnt via the learnt TEP
+ # both from its MAC and its IP
+ #
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac']))
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ ip=l['ip']))
+
+ self.assert_error_counter_equal(
+ '/err/gbp-policy-port/allow-intra-sclass',
+ allow_intra_class + 2)
+
+ self.logger.info(self.vapi.cli("show gbp endpoint"))
+ self.logger.info(self.vapi.cli("show gbp vxlan"))
+ self.logger.info(self.vapi.cli("show ip mfib"))
+
+ #
+ # If we sleep for the threshold time, the learnt endpoints should
+ # age out
+ #
+ for l in learnt:
+ self.wait_for_ep_timeout(vx_tun_l2_1.sw_if_index,
+ mac=l['mac'])
+
+ #
+ # Learn new EPs from GARP packets received on the BD's mcast tunnel
+ #
+ for ii, l in enumerate(learnt):
+ # add some junk in the reserved field of the vxlan-header
+ # next to the VNI. we should accept since reserved bits are
+ # ignored on rx.
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst="239.1.1.1") /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=88, reserved2=0x80, gpid=112, flags=0x88) /
+ Ether(src=l['mac'], dst="ff:ff:ff:ff:ff:ff") /
+ ARP(op="who-has",
+ psrc=l['ip'], pdst=l['ip'],
+ hwsrc=l['mac'], hwdst="ff:ff:ff:ff:ff:ff"))
+
+ rx = self.send_and_expect(self.pg4, [p], self.pg0)
+
+ # the new TEP
+ tep1_sw_if_index = find_vxlan_gbp_tunnel(
+ self,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[1].ip4,
+ 99)
+ self.assertNotEqual(INDEX_INVALID, tep1_sw_if_index)
+
+ #
+ # the EP is learnt via the learnt TEP
+ # both from its MAC and its IP
+ #
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac']))
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ ip=l['ip']))
+
+ #
+ # wait for the learnt endpoints to age out
+ #
+ for l in learnt:
+ self.wait_for_ep_timeout(vx_tun_l2_1.sw_if_index,
+ mac=l['mac'])
+
+ #
+ # Learn new EPs from L2 packets
+ #
+ for ii, l in enumerate(learnt):
+ # a packet with an sclass from a known EPG
+ # arriving on an unknown TEP
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=112, flags=0x88) /
+ Ether(src=l['mac'], dst=ep.mac) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, [p], self.pg0)
+
+ # the new TEP
+ tep1_sw_if_index = find_vxlan_gbp_tunnel(
+ self,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[1].ip4,
+ 99)
+ self.assertNotEqual(INDEX_INVALID, tep1_sw_if_index)
+
+ #
+ # the EP is learnt via the learnt TEP
+ # both from its MAC and its IP
+ #
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac']))
+
+ self.logger.info(self.vapi.cli("show gbp endpoint"))
+ self.logger.info(self.vapi.cli("show gbp vxlan"))
+ self.logger.info(self.vapi.cli("show vxlan-gbp tunnel"))
+
+ #
+ # wait for the learnt endpoints to age out
+ #
+ for l in learnt:
+ self.wait_for_ep_timeout(vx_tun_l2_1.sw_if_index,
+ mac=l['mac'])
+
+ #
+ # repeat. the do not learn bit is set so the EPs are not learnt
+ #
+ for l in learnt:
+ # a packet with an sclass from a known EPG
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=112, flags=0x88, gpflags="D") /
+ Ether(src=l['mac'], dst=ep.mac) /
+ IP(src=l['ip'], dst=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
+
+ for l in learnt:
+ self.assertFalse(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac']))
+
+ #
+ # repeat
+ #
+ for l in learnt:
+ # a packet with an sclass from a known EPG
+ # set a reserved bit in addition to the G and I
+ # reserved bits should not be checked on rx.
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=112, flags=0xc8) /
+ Ether(src=l['mac'], dst=ep.mac) /
+ IP(src=l['ip'], dst=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
+
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac']))
+
+ #
+ # Static EP replies to dynamics
+ #
+ self.logger.info(self.vapi.cli("sh l2fib bd_id 1"))
+ for l in learnt:
+ p = (Ether(src=ep.mac, dst=l['mac']) /
+ IP(dst=l['ip'], src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * 17, self.pg2)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg2.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[1].ip4)
+ self.assertEqual(rx[UDP].dport, 48879)
+ # the UDP source port is a random value for hashing
+ self.assertEqual(rx[VXLAN].gpid, 112)
+ self.assertEqual(rx[VXLAN].vni, 99)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ for l in learnt:
+ self.wait_for_ep_timeout(vx_tun_l2_1.sw_if_index,
+ mac=l['mac'])
+
+ #
+ # repeat in the other EPG
+ # there's no contract between 220 and 330, but the A-bit is set
+ # so the packet is cleared for delivery
+ #
+ for l in learnt:
+ # a packet with an sclass from a known EPG
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=113, flags=0x88, gpflags='A') /
+ Ether(src=l['mac'], dst=ep.mac) /
+ IP(src=l['ip'], dst=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
+
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac']))
+
+ #
+ # static EP cannot reach the learnt EPs since there is no contract
+ # only test 1 EP as the others could timeout
+ #
+ p = (Ether(src=ep.mac, dst=l['mac']) /
+ IP(dst=learnt[0]['ip'], src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_assert_no_replies(self.pg0, [p])
+
+ #
+ # refresh the entries after the check for no replies above
+ #
+ for l in learnt:
+ # a packet with an sclass from a known EPG
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=113, flags=0x88, gpflags='A') /
+ Ether(src=l['mac'], dst=ep.mac) /
+ IP(src=l['ip'], dst=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
+
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac']))
+
+ #
+ # Add the contract so they can talk
+ #
+ rule = AclRule(is_permit=1, proto=17)
+ rule2 = AclRule(src_prefix=IPv6Network((0, 0)),
+ dst_prefix=IPv6Network((0, 0)), is_permit=1, proto=17)
+ acl = VppAcl(self, rules=[rule, rule2])
+ acl.add_vpp_config()
+
+ c1 = VppGbpContract(
+ self, 401, epg_220.sclass, epg_330.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c1.add_vpp_config()
+
+ for l in learnt:
+ p = (Ether(src=ep.mac, dst=l['mac']) /
+ IP(dst=l['ip'], src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect(self.pg0, [p], self.pg2)
+
+ #
+ # send UU packets from the local EP
+ #
+ self.logger.info(self.vapi.cli("sh gbp bridge"))
+ self.logger.info(self.vapi.cli("sh bridge-domain 1 detail"))
+ p_uu = (Ether(src=ep.mac, dst="00:11:11:11:11:11") /
+ IP(dst="10.0.0.133", src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ rxs = self.send_and_expect(ep.itf, [p_uu], gbd1.uu_fwd)
+
+ self.logger.info(self.vapi.cli("sh bridge 1 detail"))
+
+ p_bm = (Ether(src=ep.mac, dst="ff:ff:ff:ff:ff:ff") /
+ IP(dst="10.0.0.133", src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ rxs = self.send_and_expect_only(ep.itf, [p_bm], tun_bm.mcast_itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg4.local_ip4)
+ self.assertEqual(rx[IP].dst, "239.1.1.1")
+ self.assertEqual(rx[UDP].dport, 48879)
+ # the UDP source port is a random value for hashing
+ self.assertEqual(rx[VXLAN].gpid, 112)
+ self.assertEqual(rx[VXLAN].vni, 88)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ self.assertFalse(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ rule = AclRule(is_permit=1, proto=17)
+ rule2 = AclRule(src_prefix=IPv6Network((0, 0)),
+ dst_prefix=IPv6Network((0, 0)), is_permit=1, proto=17)
+ acl = VppAcl(self, rules=[rule, rule2])
+ acl.add_vpp_config()
+
+ c2 = VppGbpContract(
+ self, 401, epg_330.sclass, epg_220.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c2.add_vpp_config()
+
+ for l in learnt:
+ self.wait_for_ep_timeout(vx_tun_l2_1.sw_if_index,
+ mac=l['mac'])
+ #
+ # Check v6 Endpoints learning
+ #
+ for l in learnt:
+ # a packet with an sclass from a known EPG
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=113, flags=0x88) /
+ Ether(src=l['mac'], dst=ep.mac) /
+ IPv6(src=l['ip6'], dst=ep.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
+ rx = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
+
+ self.assertTrue(find_gbp_endpoint(
+ self,
+ vx_tun_l2_1.sw_if_index,
+ ip=l['ip6'],
+ tep=[self.pg2.local_ip4,
+ self.pg2.remote_hosts[1].ip4]))
+
+ self.logger.info(self.vapi.cli("sh int"))
+ self.logger.info(self.vapi.cli("sh vxlan-gbp tunnel"))
+ self.logger.info(self.vapi.cli("sh gbp vxlan"))
+ self.logger.info(self.vapi.cli("sh gbp endpoint"))
+ self.logger.info(self.vapi.cli("sh gbp interface"))
+
+ #
+ # EP moves to a different TEP
+ #
+ for l in learnt:
+ # a packet with an sclass from a known EPG
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[2].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=113, flags=0x88) /
+ Ether(src=l['mac'], dst=ep.mac) /
+ IPv6(src=l['ip6'], dst=ep.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, p * 1, self.pg0)
+ rx = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
+
+ self.assertTrue(find_gbp_endpoint(
+ self,
+ vx_tun_l2_1.sw_if_index,
+ sclass=113,
+ mac=l['mac'],
+ tep=[self.pg2.local_ip4,
+ self.pg2.remote_hosts[2].ip4]))
+
+ #
+ # v6 remote EP reachability
+ #
+ for l in learnt:
+ p = (Ether(src=ep.mac, dst=l['mac']) /
+ IPv6(dst=l['ip6'], src=ep.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg2)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg2.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[2].ip4)
+ self.assertEqual(rx[UDP].dport, 48879)
+ # the UDP source port is a random value for hashing
+ self.assertEqual(rx[VXLAN].gpid, 112)
+ self.assertEqual(rx[VXLAN].vni, 99)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+ self.assertEqual(rx[IPv6].dst, l['ip6'])
+
+ #
+ # EP changes sclass
+ #
+ for l in learnt:
+ # a packet with an sclass from a known EPG
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[2].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=112, flags=0x88) /
+ Ether(src=l['mac'], dst=ep.mac) /
+ IPv6(src=l['ip6'], dst=ep.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, p * 1, self.pg0)
+ rx = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
+
+ self.assertTrue(find_gbp_endpoint(
+ self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac'],
+ sclass=112,
+ tep=[self.pg2.local_ip4,
+ self.pg2.remote_hosts[2].ip4]))
+
+ #
+ # check reachability and contract intra-epg
+ #
+ allow_intra_class = self.statistics.get_err_counter(
+ '/err/gbp-policy-mac/allow-intra-sclass')
+
+ for l in learnt:
+ p = (Ether(src=ep.mac, dst=l['mac']) /
+ IPv6(dst=l['ip6'], src=ep.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg2)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg2.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[2].ip4)
+ self.assertEqual(rx[UDP].dport, 48879)
+ self.assertEqual(rx[VXLAN].gpid, 112)
+ self.assertEqual(rx[VXLAN].vni, 99)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+ self.assertEqual(rx[IPv6].dst, l['ip6'])
+
+ allow_intra_class += NUM_PKTS
+
+ self.assert_error_counter_equal(
+ '/err/gbp-policy-mac/allow-intra-sclass',
+ allow_intra_class)
+
+ #
+ # clean up
+ #
+ for l in learnt:
+ self.wait_for_ep_timeout(vx_tun_l2_1.sw_if_index,
+ mac=l['mac'])
+ self.pg2.unconfig_ip4()
+ self.pg3.unconfig_ip4()
+ self.pg4.unconfig_ip4()
+
+ def test_gbp_contract(self):
+ """ GBP Contracts """
+
+ #
+ # Route Domains
+ #
+ gt4 = VppIpTable(self, 0)
+ gt4.add_vpp_config()
+ gt6 = VppIpTable(self, 0, is_ip6=True)
+ gt6.add_vpp_config()
+
+ rd0 = VppGbpRouteDomain(self, 0, 400, gt4, gt6, None, None)
+
+ rd0.add_vpp_config()
+
+ #
+ # Bridge Domains
+ #
+ bd1 = VppBridgeDomain(self, 1, arp_term=0)
+ bd2 = VppBridgeDomain(self, 2, arp_term=0)
+
+ bd1.add_vpp_config()
+ bd2.add_vpp_config()
+
+ gbd1 = VppGbpBridgeDomain(self, bd1, rd0, self.loop0)
+ gbd2 = VppGbpBridgeDomain(self, bd2, rd0, self.loop1)
+
+ gbd1.add_vpp_config()
+ gbd2.add_vpp_config()
+
+ #
+ # 3 EPGs, 2 of which share a BD.
+ #
+ epgs = [VppGbpEndpointGroup(self, 220, 1220, rd0, gbd1,
+ None, self.loop0,
+ "10.0.0.128", "2001:10::128"),
+ VppGbpEndpointGroup(self, 221, 1221, rd0, gbd1,
+ None, self.loop0,
+ "10.0.1.128", "2001:10:1::128"),
+ VppGbpEndpointGroup(self, 222, 1222, rd0, gbd2,
+ None, self.loop1,
+ "10.0.2.128", "2001:10:2::128")]
+ #
+ # 4 end-points, 2 in the same subnet, 3 in the same BD
+ #
+ eps = [VppGbpEndpoint(self, self.pg0,
+ epgs[0], None,
+ "10.0.0.1", "11.0.0.1",
+ "2001:10::1", "3001::1"),
+ VppGbpEndpoint(self, self.pg1,
+ epgs[0], None,
+ "10.0.0.2", "11.0.0.2",
+ "2001:10::2", "3001::2"),
+ VppGbpEndpoint(self, self.pg2,
+ epgs[1], None,
+ "10.0.1.1", "11.0.0.3",
+ "2001:10:1::1", "3001::3"),
+ VppGbpEndpoint(self, self.pg3,
+ epgs[2], None,
+ "10.0.2.1", "11.0.0.4",
+ "2001:10:2::1", "3001::4")]
+
+ #
+ # Config related to each of the EPGs
+ #
+ for epg in epgs:
+ # IP config on the BVI interfaces
+ if epg != epgs[1]:
+ b4 = VppIpInterfaceBind(self, epg.bvi,
+ epg.rd.t4).add_vpp_config()
+ b6 = VppIpInterfaceBind(self, epg.bvi,
+ epg.rd.t6).add_vpp_config()
+ epg.bvi.set_mac(self.router_mac)
+
+ if_ip4 = VppIpInterfaceAddress(self, epg.bvi,
+ epg.bvi_ip4, 32,
+ bind=b4).add_vpp_config()
+ if_ip6 = VppIpInterfaceAddress(self, epg.bvi,
+ epg.bvi_ip6, 128,
+ bind=b6).add_vpp_config()
+
+ # add the BD ARP termination entry for BVI IP
+ epg.bd_arp_ip4 = VppBridgeDomainArpEntry(self, epg.bd.bd,
+ str(self.router_mac),
+ epg.bvi_ip4)
+ epg.bd_arp_ip4.add_vpp_config()
+
+ # EPG in VPP
+ epg.add_vpp_config()
+
+ #
+ # config ep
+ #
+ for ep in eps:
+ ep.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("show gbp endpoint"))
+ self.logger.info(self.vapi.cli("show interface"))
+ self.logger.info(self.vapi.cli("show br"))
+
+ #
+ # Intra epg allowed without contract
+ #
+ pkt_intra_epg_220_to_220 = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg1.remote_mac) /
+ IP(src=eps[0].ip4,
+ dst=eps[1].ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect_bridged(self.pg0,
+ pkt_intra_epg_220_to_220 * 65,
+ self.pg1)
+
+ pkt_intra_epg_220_to_220 = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg1.remote_mac) /
+ IPv6(src=eps[0].ip6,
+ dst=eps[1].ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect_bridged6(self.pg0,
+ pkt_intra_epg_220_to_220 * 65,
+ self.pg1)
+
+ #
+ # Inter epg denied without contract
+ #
+ pkt_inter_epg_220_to_221 = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg2.remote_mac) /
+ IP(src=eps[0].ip4,
+ dst=eps[2].ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_assert_no_replies(self.pg0, pkt_inter_epg_220_to_221)
+
+ #
+ # A uni-directional contract from EPG 220 -> 221
+ #
+ rule = AclRule(is_permit=1, proto=17)
+ rule2 = AclRule(src_prefix=IPv6Network((0, 0)),
+ dst_prefix=IPv6Network((0, 0)), is_permit=1, proto=17)
+ rule3 = AclRule(is_permit=1, proto=1)
+ acl = VppAcl(self, rules=[rule, rule2, rule3])
+ acl.add_vpp_config()
+
+ c1 = VppGbpContract(
+ self, 400, epgs[0].sclass, epgs[1].sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c1.add_vpp_config()
+
+ self.send_and_expect_bridged(eps[0].itf,
+ pkt_inter_epg_220_to_221 * 65,
+ eps[2].itf)
+
+ pkt_inter_epg_220_to_222 = (Ether(src=self.pg0.remote_mac,
+ dst=str(self.router_mac)) /
+ IP(src=eps[0].ip4,
+ dst=eps[3].ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ self.send_and_assert_no_replies(eps[0].itf,
+ pkt_inter_epg_220_to_222 * 65)
+
+ #
+ # ping router IP in different BD
+ #
+ pkt_router_ping_220_to_221 = (Ether(src=self.pg0.remote_mac,
+ dst=str(self.router_mac)) /
+ IP(src=eps[0].ip4,
+ dst=epgs[1].bvi_ip4) /
+ ICMP(type='echo-request'))
+
+ self.send_and_expect(self.pg0, [pkt_router_ping_220_to_221], self.pg0)
+
+ pkt_router_ping_220_to_221 = (Ether(src=self.pg0.remote_mac,
+ dst=str(self.router_mac)) /
+ IPv6(src=eps[0].ip6,
+ dst=epgs[1].bvi_ip6) /
+ ICMPv6EchoRequest())
+
+ self.send_and_expect(self.pg0, [pkt_router_ping_220_to_221], self.pg0)
+
+ #
+ # contract for the return direction
+ #
+ c2 = VppGbpContract(
+ self, 400, epgs[1].sclass, epgs[0].sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c2.add_vpp_config()
+
+ self.send_and_expect_bridged(eps[0].itf,
+ pkt_inter_epg_220_to_221 * 65,
+ eps[2].itf)
+ pkt_inter_epg_221_to_220 = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg0.remote_mac) /
+ IP(src=eps[2].ip4,
+ dst=eps[0].ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ self.send_and_expect_bridged(eps[2].itf,
+ pkt_inter_epg_221_to_220 * 65,
+ eps[0].itf)
+ pkt_inter_epg_221_to_220 = (Ether(src=self.pg2.remote_mac,
+ dst=str(self.router_mac)) /
+ IP(src=eps[2].ip4,
+ dst=eps[0].ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ self.send_and_expect_routed(eps[2].itf,
+ pkt_inter_epg_221_to_220 * 65,
+ eps[0].itf,
+ str(self.router_mac))
+ pkt_inter_epg_221_to_220 = (Ether(src=self.pg2.remote_mac,
+ dst=str(self.router_mac)) /
+ IPv6(src=eps[2].ip6,
+ dst=eps[0].ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ self.send_and_expect_routed6(eps[2].itf,
+ pkt_inter_epg_221_to_220 * 65,
+ eps[0].itf,
+ str(self.router_mac))
+
+ #
+ # contract between 220 and 222 uni-direction
+ #
+ c3 = VppGbpContract(
+ self, 400, epgs[0].sclass, epgs[2].sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c3.add_vpp_config()
+
+ self.send_and_expect(eps[0].itf,
+ pkt_inter_epg_220_to_222 * 65,
+ eps[3].itf)
+
+ c3.remove_vpp_config()
+ c1.remove_vpp_config()
+ c2.remove_vpp_config()
+ acl.remove_vpp_config()
+
+ def test_gbp_bd_drop_flags(self):
+ """ GBP BD drop flags """
+
+ #
+ # IP tables
+ #
+ gt4 = VppIpTable(self, 1)
+ gt4.add_vpp_config()
+ gt6 = VppIpTable(self, 1, is_ip6=True)
+ gt6.add_vpp_config()
+
+ rd1 = VppGbpRouteDomain(self, 1, 401, gt4, gt6)
+ rd1.add_vpp_config()
+
+ #
+ # a GBP bridge domain with a BVI only
+ #
+ bd1 = VppBridgeDomain(self, 1)
+ bd1.add_vpp_config()
+
+ gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0,
+ None, None,
+ uu_drop=True, bm_drop=True)
+ gbd1.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh bridge 1 detail"))
+ self.logger.info(self.vapi.cli("sh gbp bridge"))
+
+ # ... and has a /32 applied
+ ip_addr = VppIpInterfaceAddress(self, gbd1.bvi,
+ "10.0.0.128", 32).add_vpp_config()
+
+ #
+ # The Endpoint-group
+ #
+ epg_220 = VppGbpEndpointGroup(self, 220, 112, rd1, gbd1,
+ None, self.loop0,
+ "10.0.0.128",
+ "2001:10::128",
+ VppGbpEndpointRetention(3))
+ epg_220.add_vpp_config()
+
+ ep = VppGbpEndpoint(self, self.pg0,
+ epg_220, None,
+ "10.0.0.127", "11.0.0.127",
+ "2001:10::1", "3001::1")
+ ep.add_vpp_config()
+
+ #
+ # send UU/BM packet from the local EP with UU drop and BM drop enabled
+ # in bd
+ #
+ self.logger.info(self.vapi.cli("sh bridge 1 detail"))
+ self.logger.info(self.vapi.cli("sh gbp bridge"))
+ p_uu = (Ether(src=ep.mac, dst="00:11:11:11:11:11") /
+ IP(dst="10.0.0.133", src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ self.send_and_assert_no_replies(ep.itf, [p_uu])
+
+ p_bm = (Ether(src=ep.mac, dst="ff:ff:ff:ff:ff:ff") /
+ IP(dst="10.0.0.133", src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ self.send_and_assert_no_replies(ep.itf, [p_bm])
+
+ self.pg3.unconfig_ip4()
+
+ self.logger.info(self.vapi.cli("sh int"))
+
+ def test_gbp_bd_arp_flags(self):
+ """ GBP BD arp flags """
+
+ #
+ # IP tables
+ #
+ gt4 = VppIpTable(self, 1)
+ gt4.add_vpp_config()
+ gt6 = VppIpTable(self, 1, is_ip6=True)
+ gt6.add_vpp_config()
+
+ rd1 = VppGbpRouteDomain(self, 1, 401, gt4, gt6)
+ rd1.add_vpp_config()
+
+ #
+ # Pg4 hosts the IP6 UU-flood VXLAN tunnel
+ #
+ self.pg4.config_ip4()
+ self.pg4.resolve_arp()
+
+ #
+ # Add a mcast destination VXLAN-GBP tunnel for B&M traffic
+ #
+ tun_uu = VppVxlanGbpTunnel(self, self.pg4.local_ip4,
+ "239.1.1.1", 88,
+ mcast_itf=self.pg4)
+ tun_uu.add_vpp_config()
+
+ #
+ # a GBP bridge domain with a BVI and a UU-flood interface
+ #
+ bd1 = VppBridgeDomain(self, 1)
+ bd1.add_vpp_config()
+
+ gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0,
+ tun_uu, None,
+ ucast_arp=True)
+ gbd1.add_vpp_config()
+
+ # ... and has a /32 applied
+ ip_addr = VppIpInterfaceAddress(self, gbd1.bvi,
+ "10.0.0.128", 32).add_vpp_config()
+
+ #
+ # The Endpoint-group
+ #
+ epg_220 = VppGbpEndpointGroup(self, 220, 112, rd1, gbd1,
+ None, self.loop0,
+ "10.0.0.128",
+ "2001:10::128",
+ VppGbpEndpointRetention(2))
+ epg_220.add_vpp_config()
+
+ ep = VppGbpEndpoint(self, self.pg0,
+ epg_220, None,
+ "10.0.0.127", "11.0.0.127",
+ "2001:10::1", "3001::1")
+ ep.add_vpp_config()
+
+ #
+ # send ARP packet from the local EP expect it on the uu interface
+ #
+ self.logger.info(self.vapi.cli("sh bridge 1 detail"))
+ self.logger.info(self.vapi.cli("sh gbp bridge"))
+ p_arp = (Ether(src=ep.mac, dst="ff:ff:ff:ff:ff:ff") /
+ ARP(op="who-has",
+ psrc=ep.ip4, pdst="10.0.0.99",
+ hwsrc=ep.mac,
+ hwdst="ff:ff:ff:ff:ff:ff"))
+ self.send_and_expect(ep.itf, [p_arp], self.pg4)
+
+ self.pg4.unconfig_ip4()
+
+ def test_gbp_learn_vlan_l2(self):
+ """ GBP L2 Endpoint w/ VLANs"""
+
+ ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
+ learnt = [{'mac': '00:00:11:11:11:01',
+ 'ip': '10.0.0.1',
+ 'ip6': '2001:10::2'},
+ {'mac': '00:00:11:11:11:02',
+ 'ip': '10.0.0.2',
+ 'ip6': '2001:10::3'}]
+
+ #
+ # IP tables
+ #
+ gt4 = VppIpTable(self, 1)
+ gt4.add_vpp_config()
+ gt6 = VppIpTable(self, 1, is_ip6=True)
+ gt6.add_vpp_config()
+
+ rd1 = VppGbpRouteDomain(self, 1, 401, gt4, gt6)
+ rd1.add_vpp_config()
+
+ #
+ # Pg2 hosts the vxlan tunnel, hosts on pg2 to act as TEPs
+ #
+ self.pg2.config_ip4()
+ self.pg2.resolve_arp()
+ self.pg2.generate_remote_hosts(4)
+ self.pg2.configure_ipv4_neighbors()
+ self.pg3.config_ip4()
+ self.pg3.resolve_arp()
+
+ #
+ # The EP will be on a vlan sub-interface
+ #
+ vlan_11 = VppDot1QSubint(self, self.pg0, 11)
+ vlan_11.admin_up()
+ self.vapi.l2_interface_vlan_tag_rewrite(
+ sw_if_index=vlan_11.sw_if_index, vtr_op=L2_VTR_OP.L2_POP_1,
+ push_dot1q=11)
+
+ bd_uu_fwd = VppVxlanGbpTunnel(self, self.pg3.local_ip4,
+ self.pg3.remote_ip4, 116)
+ bd_uu_fwd.add_vpp_config()
+
+ #
+ # a GBP bridge domain with a BVI and a UU-flood interface
+ # The BD is marked as do not learn, so no endpoints are ever
+ # learnt in this BD.
+ #
+ bd1 = VppBridgeDomain(self, 1)
+ bd1.add_vpp_config()
+ gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0, bd_uu_fwd,
+ learn=False)
+ gbd1.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh bridge 1 detail"))
+ self.logger.info(self.vapi.cli("sh gbp bridge"))
+
+ # ... and has a /32 applied
+ ip_addr = VppIpInterfaceAddress(self, gbd1.bvi,
+ "10.0.0.128", 32).add_vpp_config()
+
+ #
+ # The Endpoint-group in which we are learning endpoints
+ #
+ epg_220 = VppGbpEndpointGroup(self, 220, 441, rd1, gbd1,
+ None, self.loop0,
+ "10.0.0.128",
+ "2001:10::128",
+ VppGbpEndpointRetention(4))
+ epg_220.add_vpp_config()
+
+ #
+ # The VXLAN GBP tunnel is a bridge-port and has L2 endpoint
+ # learning enabled
+ #
+ vx_tun_l2_1 = VppGbpVxlanTunnel(
+ self, 99, bd1.bd_id,
+ VppEnum.vl_api_gbp_vxlan_tunnel_mode_t.GBP_VXLAN_TUNNEL_MODE_L2,
+ self.pg2.local_ip4)
+ vx_tun_l2_1.add_vpp_config()
+
+ #
+ # A static endpoint that the learnt endpoints are trying to
+ # talk to
+ #
+ ep = VppGbpEndpoint(self, vlan_11,
+ epg_220, None,
+ "10.0.0.127", "11.0.0.127",
+ "2001:10::1", "3001::1")
+ ep.add_vpp_config()
+
+ self.assertTrue(find_route(self, ep.ip4, 32, table_id=1))
+
+ #
+ # Send to the static EP
+ #
+ for ii, l in enumerate(learnt):
+ # a packet with an sclass from a known EPG
+ # arriving on an unknown TEP
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=99, gpid=441, flags=0x88) /
+ Ether(src=l['mac'], dst=ep.mac) /
+ IP(src=l['ip'], dst=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg2, [p], self.pg0)
+
+ #
+ # packet to EP has the EP's vlan tag
+ #
+ for rx in rxs:
+ self.assertEqual(rx[Dot1Q].vlan, 11)
+
+ #
+ # the EP is not learnt since the BD setting prevents it
+ # also no TEP too
+ #
+ self.assertFalse(find_gbp_endpoint(self,
+ vx_tun_l2_1.sw_if_index,
+ mac=l['mac']))
+ self.assertEqual(INDEX_INVALID,
+ find_vxlan_gbp_tunnel(
+ self,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[1].ip4,
+ 99))
+
+ self.assertEqual(len(self.vapi.gbp_endpoint_dump()), 1)
+
+ #
+ # static to remotes
+ # we didn't learn the remotes so they are sent to the UU-fwd
+ #
+ for l in learnt:
+ p = (Ether(src=ep.mac, dst=l['mac']) /
+ Dot1Q(vlan=11) /
+ IP(dst=l['ip'], src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * 17, self.pg3)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg3.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg3.remote_ip4)
+ self.assertEqual(rx[UDP].dport, 48879)
+ # the UDP source port is a random value for hashing
+ self.assertEqual(rx[VXLAN].gpid, 441)
+ self.assertEqual(rx[VXLAN].vni, 116)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ self.assertFalse(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ self.pg2.unconfig_ip4()
+ self.pg3.unconfig_ip4()
+
+ def test_gbp_learn_l3(self):
+ """ GBP L3 Endpoint Learning """
+
+ self.vapi.cli("set logging class gbp level debug")
+
+ ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
+ routed_dst_mac = "00:0c:0c:0c:0c:0c"
+ routed_src_mac = "00:22:bd:f8:19:ff"
+
+ learnt = [{'mac': '00:00:11:11:11:02',
+ 'ip': '10.0.1.2',
+ 'ip6': '2001:10::2'},
+ {'mac': '00:00:11:11:11:03',
+ 'ip': '10.0.1.3',
+ 'ip6': '2001:10::3'}]
+
+ #
+ # IP tables
+ #
+ t4 = VppIpTable(self, 1)
+ t4.add_vpp_config()
+ t6 = VppIpTable(self, 1, True)
+ t6.add_vpp_config()
+
+ tun_ip4_uu = VppVxlanGbpTunnel(self, self.pg4.local_ip4,
+ self.pg4.remote_ip4, 114)
+ tun_ip6_uu = VppVxlanGbpTunnel(self, self.pg4.local_ip4,
+ self.pg4.remote_ip4, 116)
+ tun_ip4_uu.add_vpp_config()
+ tun_ip6_uu.add_vpp_config()
+
+ rd1 = VppGbpRouteDomain(self, 2, 401, t4, t6, tun_ip4_uu, tun_ip6_uu)
+ rd1.add_vpp_config()
+
+ self.loop0.set_mac(self.router_mac)
+
+ #
+ # Bind the BVI to the RD
+ #
+ b4 = VppIpInterfaceBind(self, self.loop0, t4).add_vpp_config()
+ b6 = VppIpInterfaceBind(self, self.loop0, t6).add_vpp_config()
+
+ #
+ # Pg2 hosts the vxlan tunnel
+ # hosts on pg2 to act as TEPs
+ # pg3 is BD uu-fwd
+ # pg4 is RD uu-fwd
+ #
+ self.pg2.config_ip4()
+ self.pg2.resolve_arp()
+ self.pg2.generate_remote_hosts(4)
+ self.pg2.configure_ipv4_neighbors()
+ self.pg3.config_ip4()
+ self.pg3.resolve_arp()
+ self.pg4.config_ip4()
+ self.pg4.resolve_arp()
+
+ #
+ # a GBP bridge domain with a BVI and a UU-flood interface
+ #
+ bd1 = VppBridgeDomain(self, 1)
+ bd1.add_vpp_config()
+ gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0, self.pg3)
+ gbd1.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh bridge 1 detail"))
+ self.logger.info(self.vapi.cli("sh gbp bridge"))
+ self.logger.info(self.vapi.cli("sh gbp route"))
+
+ # ... and has a /32 and /128 applied
+ ip4_addr = VppIpInterfaceAddress(self, gbd1.bvi,
+ "10.0.0.128", 32,
+ bind=b4).add_vpp_config()
+ ip6_addr = VppIpInterfaceAddress(self, gbd1.bvi,
+ "2001:10::128", 128,
+ bind=b6).add_vpp_config()
+
+ #
+ # The Endpoint-group in which we are learning endpoints
+ #
+ epg_220 = VppGbpEndpointGroup(self, 220, 441, rd1, gbd1,
+ None, self.loop0,
+ "10.0.0.128",
+ "2001:10::128",
+ VppGbpEndpointRetention(4))
+ epg_220.add_vpp_config()
+
+ #
+ # The VXLAN GBP tunnel is in L3 mode with learning enabled
+ #
+ vx_tun_l3 = VppGbpVxlanTunnel(
+ self, 101, rd1.rd_id,
+ VppEnum.vl_api_gbp_vxlan_tunnel_mode_t.GBP_VXLAN_TUNNEL_MODE_L3,
+ self.pg2.local_ip4)
+ vx_tun_l3.add_vpp_config()
+
+ #
+ # A static endpoint that the learnt endpoints are trying to
+ # talk to
+ #
+ ep = VppGbpEndpoint(self, self.pg0,
+ epg_220, None,
+ "10.0.0.127", "11.0.0.127",
+ "2001:10::1", "3001::1")
+ ep.add_vpp_config()
+
+ #
+ # learn some remote IPv4 EPs
+ #
+ for ii, l in enumerate(learnt):
+ # a packet with an sclass from a known EPG
+ # arriving on an unknown TEP
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=101, gpid=441, flags=0x88) /
+ Ether(src=l['mac'], dst="00:00:00:11:11:11") /
+ IP(src=l['ip'], dst=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, [p], self.pg0)
+
+ # the new TEP
+ tep1_sw_if_index = find_vxlan_gbp_tunnel(
+ self,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[1].ip4,
+ vx_tun_l3.vni)
+ self.assertNotEqual(INDEX_INVALID, tep1_sw_if_index)
+
+ # endpoint learnt via the parent GBP-vxlan interface
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l3._sw_if_index,
+ ip=l['ip']))
+
+ #
+ # Static IPv4 EP replies to learnt
+ #
+ for l in learnt:
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IP(dst=l['ip'], src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * 1, self.pg2)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg2.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[1].ip4)
+ self.assertEqual(rx[UDP].dport, 48879)
+ # the UDP source port is a random value for hashing
+ self.assertEqual(rx[VXLAN].gpid, 441)
+ self.assertEqual(rx[VXLAN].vni, 101)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ inner = rx[VXLAN].payload
+
+ self.assertEqual(inner[Ether].src, routed_src_mac)
+ self.assertEqual(inner[Ether].dst, routed_dst_mac)
+ self.assertEqual(inner[IP].src, ep.ip4)
+ self.assertEqual(inner[IP].dst, l['ip'])
+
+ for l in learnt:
+ self.assertFalse(find_gbp_endpoint(self,
+ tep1_sw_if_index,
+ ip=l['ip']))
+
+ #
+ # learn some remote IPv6 EPs
+ #
+ for ii, l in enumerate(learnt):
+ # a packet with an sclass from a known EPG
+ # arriving on an unknown TEP
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=101, gpid=441, flags=0x88) /
+ Ether(src=l['mac'], dst="00:00:00:11:11:11") /
+ IPv6(src=l['ip6'], dst=ep.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, [p], self.pg0)
+
+ # the new TEP
+ tep1_sw_if_index = find_vxlan_gbp_tunnel(
+ self,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[1].ip4,
+ vx_tun_l3.vni)
+ self.assertNotEqual(INDEX_INVALID, tep1_sw_if_index)
+
+ self.logger.info(self.vapi.cli("show gbp bridge"))
+ self.logger.info(self.vapi.cli("show vxlan-gbp tunnel"))
+ self.logger.info(self.vapi.cli("show gbp vxlan"))
+ self.logger.info(self.vapi.cli("show int addr"))
+
+ # endpoint learnt via the TEP
+ self.assertTrue(find_gbp_endpoint(self, ip=l['ip6']))
+
+ self.logger.info(self.vapi.cli("show gbp endpoint"))
+ self.logger.info(self.vapi.cli("show ip fib index 1 %s" % l['ip']))
+
+ #
+ # Static EP replies to learnt
+ #
+ for l in learnt:
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IPv6(dst=l['ip6'], src=ep.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg2)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg2.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[1].ip4)
+ self.assertEqual(rx[UDP].dport, 48879)
+ # the UDP source port is a random value for hashing
+ self.assertEqual(rx[VXLAN].gpid, 441)
+ self.assertEqual(rx[VXLAN].vni, 101)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ inner = rx[VXLAN].payload
+
+ self.assertEqual(inner[Ether].src, routed_src_mac)
+ self.assertEqual(inner[Ether].dst, routed_dst_mac)
+ self.assertEqual(inner[IPv6].src, ep.ip6)
+ self.assertEqual(inner[IPv6].dst, l['ip6'])
+
+ self.logger.info(self.vapi.cli("sh gbp endpoint"))
+ for l in learnt:
+ self.wait_for_ep_timeout(ip=l['ip'])
+
+ #
+ # Static sends to unknown EP with no route
+ #
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IP(dst="10.0.0.99", src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_assert_no_replies(self.pg0, [p])
+
+ #
+ # Add a route to static EP's v4 and v6 subnet
+ #
+ se_10_24 = VppGbpSubnet(
+ self, rd1, "10.0.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_TRANSPORT)
+ se_10_24.add_vpp_config()
+
+ #
+ # static pings router
+ #
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IP(dst=epg_220.bvi_ip4, src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg0)
+
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IPv6(dst=epg_220.bvi_ip6, src=ep.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg0)
+
+ #
+ # packets to address in the subnet are sent on the uu-fwd
+ #
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IP(dst="10.0.0.99", src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, [p], self.pg4)
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg4.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg4.remote_ip4)
+ self.assertEqual(rx[UDP].dport, 48879)
+ # the UDP source port is a random value for hashing
+ self.assertEqual(rx[VXLAN].gpid, 441)
+ self.assertEqual(rx[VXLAN].vni, 114)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # policy is not applied to packets sent to the uu-fwd interfaces
+ self.assertFalse(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ #
+ # learn some remote IPv4 EPs
+ #
+ for ii, l in enumerate(learnt):
+ # a packet with an sclass from a known EPG
+ # arriving on an unknown TEP
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[2].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=101, gpid=441, flags=0x88) /
+ Ether(src=l['mac'], dst="00:00:00:11:11:11") /
+ IP(src=l['ip'], dst=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, [p], self.pg0)
+
+ # the new TEP
+ tep1_sw_if_index = find_vxlan_gbp_tunnel(
+ self,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[2].ip4,
+ vx_tun_l3.vni)
+ self.assertNotEqual(INDEX_INVALID, tep1_sw_if_index)
+
+ # endpoint learnt via the parent GBP-vxlan interface
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l3._sw_if_index,
+ ip=l['ip']))
+
+ #
+ # Add a remote endpoint from the API
+ #
+ rep_88 = VppGbpEndpoint(self, vx_tun_l3,
+ epg_220, None,
+ "10.0.0.88", "11.0.0.88",
+ "2001:10::88", "3001::88",
+ ep_flags.GBP_API_ENDPOINT_FLAG_REMOTE,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[2].ip4,
+ mac=None)
+ rep_88.add_vpp_config()
+
+ #
+ # Add a remote endpoint from the API that matches an existing one
+ # this is a lower priority, hence the packet is sent to the DP leanrt
+ # TEP
+ #
+ rep_2 = VppGbpEndpoint(self, vx_tun_l3,
+ epg_220, None,
+ learnt[0]['ip'], "11.0.0.101",
+ learnt[0]['ip6'], "3001::101",
+ ep_flags.GBP_API_ENDPOINT_FLAG_REMOTE,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[1].ip4,
+ mac=None)
+ rep_2.add_vpp_config()
+
+ #
+ # Add a route to the learned EP's v4 subnet
+ # packets should be send on the v4/v6 uu=fwd interface resp.
+ #
+ se_10_1_24 = VppGbpSubnet(
+ self, rd1, "10.0.1.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_TRANSPORT)
+ se_10_1_24.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("show gbp endpoint"))
+
+ ips = ["10.0.0.88", learnt[0]['ip']]
+ for ip in ips:
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IP(dst=ip, src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg2)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg2.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[2].ip4)
+ self.assertEqual(rx[UDP].dport, 48879)
+ # the UDP source port is a random value for hashing
+ self.assertEqual(rx[VXLAN].gpid, 441)
+ self.assertEqual(rx[VXLAN].vni, 101)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ inner = rx[VXLAN].payload
+
+ self.assertEqual(inner[Ether].src, routed_src_mac)
+ self.assertEqual(inner[Ether].dst, routed_dst_mac)
+ self.assertEqual(inner[IP].src, ep.ip4)
+ self.assertEqual(inner[IP].dst, ip)
+
+ #
+ # remove the API remote EPs, only API sourced is gone, the DP
+ # learnt one remains
+ #
+ rep_88.remove_vpp_config()
+ rep_2.remove_vpp_config()
+
+ self.assertTrue(find_gbp_endpoint(self, ip=rep_2.ip4))
+
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IP(src=ep.ip4, dst=rep_2.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ rxs = self.send_and_expect(self.pg0, [p], self.pg2)
+
+ self.assertFalse(find_gbp_endpoint(self, ip=rep_88.ip4))
+
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IP(src=ep.ip4, dst=rep_88.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ rxs = self.send_and_expect(self.pg0, [p], self.pg4)
+
+ #
+ # to appease the testcase we cannot have the registered EP still
+ # present (because it's DP learnt) when the TC ends so wait until
+ # it is removed
+ #
+ self.wait_for_ep_timeout(ip=rep_88.ip4)
+ self.wait_for_ep_timeout(ip=rep_2.ip4)
+
+ #
+ # Same as above, learn a remote EP via CP and DP
+ # this time remove the DP one first. expect the CP data to remain
+ #
+ rep_3 = VppGbpEndpoint(self, vx_tun_l3,
+ epg_220, None,
+ "10.0.1.4", "11.0.0.103",
+ "2001::10:3", "3001::103",
+ ep_flags.GBP_API_ENDPOINT_FLAG_REMOTE,
+ self.pg2.local_ip4,
+ self.pg2.remote_hosts[1].ip4,
+ mac=None)
+ rep_3.add_vpp_config()
+
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[2].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=101, gpid=441, flags=0x88) /
+ Ether(src=l['mac'], dst="00:00:00:11:11:11") /
+ IP(src="10.0.1.4", dst=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ rxs = self.send_and_expect(self.pg2, p * NUM_PKTS, self.pg0)
+
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l3._sw_if_index,
+ ip=rep_3.ip4,
+ tep=[self.pg2.local_ip4,
+ self.pg2.remote_hosts[2].ip4]))
+
+ p = (Ether(src=ep.mac, dst=self.loop0.local_mac) /
+ IP(dst="10.0.1.4", src=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ rxs = self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg2)
+
+ # host 2 is the DP learned TEP
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg2.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[2].ip4)
+
+ self.wait_for_ep_timeout(ip=rep_3.ip4,
+ tep=[self.pg2.local_ip4,
+ self.pg2.remote_hosts[2].ip4])
+
+ rxs = self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg2)
+
+ # host 1 is the CP learned TEP
+ for rx in rxs:
+ self.assertEqual(rx[IP].src, self.pg2.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg2.remote_hosts[1].ip4)
+
+ #
+ # shutdown with learnt endpoint present
+ #
+ p = (Ether(src=self.pg2.remote_mac,
+ dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_hosts[1].ip4,
+ dst=self.pg2.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=101, gpid=441, flags=0x88) /
+ Ether(src=l['mac'], dst="00:00:00:11:11:11") /
+ IP(src=learnt[1]['ip'], dst=ep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rx = self.send_and_expect(self.pg2, [p], self.pg0)
+
+ # endpoint learnt via the parent GBP-vxlan interface
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l3._sw_if_index,
+ ip=l['ip']))
+
+ #
+ # TODO
+ # remote endpoint becomes local
+ #
+ self.pg2.unconfig_ip4()
+ self.pg3.unconfig_ip4()
+ self.pg4.unconfig_ip4()
+
+ def test_gbp_redirect(self):
+ """ GBP Endpoint Redirect """
+
+ self.vapi.cli("set logging class gbp level debug")
+
+ ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
+ routed_dst_mac = "00:0c:0c:0c:0c:0c"
+ routed_src_mac = "00:22:bd:f8:19:ff"
+
+ learnt = [{'mac': '00:00:11:11:11:02',
+ 'ip': '10.0.1.2',
+ 'ip6': '2001:10::2'},
+ {'mac': '00:00:11:11:11:03',
+ 'ip': '10.0.1.3',
+ 'ip6': '2001:10::3'}]
+
+ #
+ # IP tables
+ #
+ t4 = VppIpTable(self, 1)
+ t4.add_vpp_config()
+ t6 = VppIpTable(self, 1, True)
+ t6.add_vpp_config()
+
+ rd1 = VppGbpRouteDomain(self, 2, 402, t4, t6)
+ rd1.add_vpp_config()
+
+ self.loop0.set_mac(self.router_mac)
+
+ #
+ # Bind the BVI to the RD
+ #
+ b_ip4 = VppIpInterfaceBind(self, self.loop0, t4).add_vpp_config()
+ b_ip6 = VppIpInterfaceBind(self, self.loop0, t6).add_vpp_config()
+
+ #
+ # Pg7 hosts a BD's UU-fwd
+ #
+ self.pg7.config_ip4()
+ self.pg7.resolve_arp()
+
+ #
+ # a GBP bridge domains for the EPs
+ #
+ bd1 = VppBridgeDomain(self, 1)
+ bd1.add_vpp_config()
+ gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0)
+ gbd1.add_vpp_config()
+
+ bd2 = VppBridgeDomain(self, 2)
+ bd2.add_vpp_config()
+ gbd2 = VppGbpBridgeDomain(self, bd2, rd1, self.loop1)
+ gbd2.add_vpp_config()
+
+ # ... and has a /32 and /128 applied
+ ip4_addr = VppIpInterfaceAddress(self, gbd1.bvi,
+ "10.0.0.128", 32,
+ bind=b_ip4).add_vpp_config()
+ ip6_addr = VppIpInterfaceAddress(self, gbd1.bvi,
+ "2001:10::128", 128,
+ bind=b_ip6).add_vpp_config()
+ ip4_addr = VppIpInterfaceAddress(self, gbd2.bvi,
+ "10.0.1.128", 32).add_vpp_config()
+ ip6_addr = VppIpInterfaceAddress(self, gbd2.bvi,
+ "2001:11::128", 128).add_vpp_config()
+
+ #
+ # The Endpoint-groups in which we are learning endpoints
+ #
+ epg_220 = VppGbpEndpointGroup(self, 220, 440, rd1, gbd1,
+ None, gbd1.bvi,
+ "10.0.0.128",
+ "2001:10::128",
+ VppGbpEndpointRetention(60))
+ epg_220.add_vpp_config()
+ epg_221 = VppGbpEndpointGroup(self, 221, 441, rd1, gbd2,
+ None, gbd2.bvi,
+ "10.0.1.128",
+ "2001:11::128",
+ VppGbpEndpointRetention(60))
+ epg_221.add_vpp_config()
+ epg_222 = VppGbpEndpointGroup(self, 222, 442, rd1, gbd1,
+ None, gbd1.bvi,
+ "10.0.2.128",
+ "2001:12::128",
+ VppGbpEndpointRetention(60))
+ epg_222.add_vpp_config()
+
+ #
+ # a GBP bridge domains for the SEPs
+ #
+ bd_uu1 = VppVxlanGbpTunnel(self, self.pg7.local_ip4,
+ self.pg7.remote_ip4, 116)
+ bd_uu1.add_vpp_config()
+ bd_uu2 = VppVxlanGbpTunnel(self, self.pg7.local_ip4,
+ self.pg7.remote_ip4, 117)
+ bd_uu2.add_vpp_config()
+
+ bd3 = VppBridgeDomain(self, 3)
+ bd3.add_vpp_config()
+ gbd3 = VppGbpBridgeDomain(self, bd3, rd1, self.loop2,
+ bd_uu1, learn=False)
+ gbd3.add_vpp_config()
+ bd4 = VppBridgeDomain(self, 4)
+ bd4.add_vpp_config()
+ gbd4 = VppGbpBridgeDomain(self, bd4, rd1, self.loop3,
+ bd_uu2, learn=False)
+ gbd4.add_vpp_config()
+
+ #
+ # EPGs in which the service endpoints exist
+ #
+ epg_320 = VppGbpEndpointGroup(self, 320, 550, rd1, gbd3,
+ None, gbd1.bvi,
+ "12.0.0.128",
+ "4001:10::128",
+ VppGbpEndpointRetention(60))
+ epg_320.add_vpp_config()
+ epg_321 = VppGbpEndpointGroup(self, 321, 551, rd1, gbd4,
+ None, gbd2.bvi,
+ "12.0.1.128",
+ "4001:11::128",
+ VppGbpEndpointRetention(60))
+ epg_321.add_vpp_config()
+
+ #
+ # three local endpoints
+ #
+ ep1 = VppGbpEndpoint(self, self.pg0,
+ epg_220, None,
+ "10.0.0.1", "11.0.0.1",
+ "2001:10::1", "3001:10::1")
+ ep1.add_vpp_config()
+ ep2 = VppGbpEndpoint(self, self.pg1,
+ epg_221, None,
+ "10.0.1.1", "11.0.1.1",
+ "2001:11::1", "3001:11::1")
+ ep2.add_vpp_config()
+ ep3 = VppGbpEndpoint(self, self.pg2,
+ epg_222, None,
+ "10.0.2.2", "11.0.2.2",
+ "2001:12::1", "3001:12::1")
+ ep3.add_vpp_config()
+
+ #
+ # service endpoints
+ #
+ sep1 = VppGbpEndpoint(self, self.pg3,
+ epg_320, None,
+ "12.0.0.1", "13.0.0.1",
+ "4001:10::1", "5001:10::1")
+ sep1.add_vpp_config()
+ sep2 = VppGbpEndpoint(self, self.pg4,
+ epg_320, None,
+ "12.0.0.2", "13.0.0.2",
+ "4001:10::2", "5001:10::2")
+ sep2.add_vpp_config()
+ sep3 = VppGbpEndpoint(self, self.pg5,
+ epg_321, None,
+ "12.0.1.1", "13.0.1.1",
+ "4001:11::1", "5001:11::1")
+ sep3.add_vpp_config()
+ # this EP is not installed immediately
+ sep4 = VppGbpEndpoint(self, self.pg6,
+ epg_321, None,
+ "12.0.1.2", "13.0.1.2",
+ "4001:11::2", "5001:11::2")
+
+ #
+ # an L2 switch packet between local EPs in different EPGs
+ # different dest ports on each so the are LB hashed differently
+ #
+ p4 = [(Ether(src=ep1.mac, dst=ep3.mac) /
+ IP(src=ep1.ip4, dst=ep3.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(src=ep3.mac, dst=ep1.mac) /
+ IP(src=ep3.ip4, dst=ep1.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))]
+ p6 = [(Ether(src=ep1.mac, dst=ep3.mac) /
+ IPv6(src=ep1.ip6, dst=ep3.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(src=ep3.mac, dst=ep1.mac) /
+ IPv6(src=ep3.ip6, dst=ep1.ip6) /
+ UDP(sport=1234, dport=1230) /
+ Raw(b'\xa5' * 100))]
+
+ # should be dropped since no contract yet
+ self.send_and_assert_no_replies(self.pg0, [p4[0]])
+ self.send_and_assert_no_replies(self.pg0, [p6[0]])
+
+ #
+ # Add a contract with a rule to load-balance redirect via SEP1 and SEP2
+ # one of the next-hops is via an EP that is not known
+ #
+ rule4 = AclRule(is_permit=1, proto=17)
+ rule6 = AclRule(src_prefix=IPv6Network((0, 0)),
+ dst_prefix=IPv6Network((0, 0)), is_permit=1, proto=17)
+ acl = VppAcl(self, rules=[rule4, rule6])
+ acl.add_vpp_config()
+
+ #
+ # test the src-ip hash mode
+ #
+ c1 = VppGbpContract(
+ self, 402, epg_220.sclass, epg_222.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip4, sep1.epg.rd),
+ VppGbpContractNextHop(sep2.vmac, sep2.epg.bd,
+ sep2.ip4, sep2.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [VppGbpContractNextHop(sep3.vmac, sep3.epg.bd,
+ sep3.ip6, sep3.epg.rd),
+ VppGbpContractNextHop(sep4.vmac, sep4.epg.bd,
+ sep4.ip6, sep4.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c1.add_vpp_config()
+
+ c2 = VppGbpContract(
+ self, 402, epg_222.sclass, epg_220.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip4, sep1.epg.rd),
+ VppGbpContractNextHop(sep2.vmac, sep2.epg.bd,
+ sep2.ip4, sep2.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [VppGbpContractNextHop(sep3.vmac, sep3.epg.bd,
+ sep3.ip6, sep3.epg.rd),
+ VppGbpContractNextHop(sep4.vmac, sep4.epg.bd,
+ sep4.ip6, sep4.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c2.add_vpp_config()
+
+ #
+ # send again with the contract preset, now packets arrive
+ # at SEP1 or SEP2 depending on the hashing
+ #
+ rxs = self.send_and_expect(self.pg0, p4[0] * 17, sep1.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep1.mac)
+ self.assertEqual(rx[IP].src, ep1.ip4)
+ self.assertEqual(rx[IP].dst, ep3.ip4)
+
+ rxs = self.send_and_expect(self.pg2, p4[1] * 17, sep2.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep2.mac)
+ self.assertEqual(rx[IP].src, ep3.ip4)
+ self.assertEqual(rx[IP].dst, ep1.ip4)
+
+ rxs = self.send_and_expect(self.pg0, p6[0] * 17, self.pg7)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ self.assertEqual(rx[VXLAN].vni, 117)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # redirect policy has been applied
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ inner = rx[VXLAN].payload
+
+ self.assertEqual(inner[Ether].src, routed_src_mac)
+ self.assertEqual(inner[Ether].dst, sep4.mac)
+ self.assertEqual(inner[IPv6].src, ep1.ip6)
+ self.assertEqual(inner[IPv6].dst, ep3.ip6)
+
+ rxs = self.send_and_expect(self.pg2, p6[1] * 17, sep3.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep3.mac)
+ self.assertEqual(rx[IPv6].src, ep3.ip6)
+ self.assertEqual(rx[IPv6].dst, ep1.ip6)
+
+ #
+ # programme the unknown EP
+ #
+ sep4.add_vpp_config()
+
+ rxs = self.send_and_expect(self.pg0, p6[0] * 17, sep4.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep4.mac)
+ self.assertEqual(rx[IPv6].src, ep1.ip6)
+ self.assertEqual(rx[IPv6].dst, ep3.ip6)
+
+ #
+ # and revert back to unprogrammed
+ #
+ sep4.remove_vpp_config()
+
+ rxs = self.send_and_expect(self.pg0, p6[0] * 17, self.pg7)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ self.assertEqual(rx[VXLAN].vni, 117)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # redirect policy has been applied
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ inner = rx[VXLAN].payload
+
+ self.assertEqual(inner[Ether].src, routed_src_mac)
+ self.assertEqual(inner[Ether].dst, sep4.mac)
+ self.assertEqual(inner[IPv6].src, ep1.ip6)
+ self.assertEqual(inner[IPv6].dst, ep3.ip6)
+
+ c1.remove_vpp_config()
+ c2.remove_vpp_config()
+
+ #
+ # test the symmetric hash mode
+ #
+ c1 = VppGbpContract(
+ self, 402, epg_220.sclass, epg_222.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip4, sep1.epg.rd),
+ VppGbpContractNextHop(sep2.vmac, sep2.epg.bd,
+ sep2.ip4, sep2.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
+ [VppGbpContractNextHop(sep3.vmac, sep3.epg.bd,
+ sep3.ip6, sep3.epg.rd),
+ VppGbpContractNextHop(sep4.vmac, sep4.epg.bd,
+ sep4.ip6, sep4.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c1.add_vpp_config()
+
+ c2 = VppGbpContract(
+ self, 402, epg_222.sclass, epg_220.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip4, sep1.epg.rd),
+ VppGbpContractNextHop(sep2.vmac, sep2.epg.bd,
+ sep2.ip4, sep2.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
+ [VppGbpContractNextHop(sep3.vmac, sep3.epg.bd,
+ sep3.ip6, sep3.epg.rd),
+ VppGbpContractNextHop(sep4.vmac, sep4.epg.bd,
+ sep4.ip6, sep4.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c2.add_vpp_config()
+
+ #
+ # send again with the contract preset, now packets arrive
+ # at SEP1 for both directions
+ #
+ rxs = self.send_and_expect(self.pg0, p4[0] * 17, sep1.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep1.mac)
+ self.assertEqual(rx[IP].src, ep1.ip4)
+ self.assertEqual(rx[IP].dst, ep3.ip4)
+
+ rxs = self.send_and_expect(self.pg2, p4[1] * 17, sep1.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep1.mac)
+ self.assertEqual(rx[IP].src, ep3.ip4)
+ self.assertEqual(rx[IP].dst, ep1.ip4)
+
+ #
+ # programme the unknown EP for the L3 tests
+ #
+ sep4.add_vpp_config()
+
+ #
+ # an L3 switch packet between local EPs in different EPGs
+ # different dest ports on each so the are LB hashed differently
+ #
+ p4 = [(Ether(src=ep1.mac, dst=str(self.router_mac)) /
+ IP(src=ep1.ip4, dst=ep2.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(src=ep2.mac, dst=str(self.router_mac)) /
+ IP(src=ep2.ip4, dst=ep1.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))]
+ p6 = [(Ether(src=ep1.mac, dst=str(self.router_mac)) /
+ IPv6(src=ep1.ip6, dst=ep2.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(src=ep2.mac, dst=str(self.router_mac)) /
+ IPv6(src=ep2.ip6, dst=ep1.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))]
+
+ c3 = VppGbpContract(
+ self, 402, epg_220.sclass, epg_221.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip4, sep1.epg.rd),
+ VppGbpContractNextHop(sep2.vmac, sep2.epg.bd,
+ sep2.ip4, sep2.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
+ [VppGbpContractNextHop(sep3.vmac, sep3.epg.bd,
+ sep3.ip6, sep3.epg.rd),
+ VppGbpContractNextHop(sep4.vmac, sep4.epg.bd,
+ sep4.ip6, sep4.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c3.add_vpp_config()
+
+ rxs = self.send_and_expect(self.pg0, p4[0] * 17, sep1.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep1.mac)
+ self.assertEqual(rx[IP].src, ep1.ip4)
+ self.assertEqual(rx[IP].dst, ep2.ip4)
+
+ #
+ # learn a remote EP in EPG 221
+ # packets coming from unknown remote EPs will be leant & redirected
+ #
+ vx_tun_l3 = VppGbpVxlanTunnel(
+ self, 444, rd1.rd_id,
+ VppEnum.vl_api_gbp_vxlan_tunnel_mode_t.GBP_VXLAN_TUNNEL_MODE_L3,
+ self.pg2.local_ip4)
+ vx_tun_l3.add_vpp_config()
+
+ c4 = VppGbpContract(
+ self, 402, epg_221.sclass, epg_220.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip4, sep1.epg.rd),
+ VppGbpContractNextHop(sep2.vmac, sep2.epg.bd,
+ sep2.ip4, sep2.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [VppGbpContractNextHop(sep3.vmac, sep3.epg.bd,
+ sep3.ip6, sep3.epg.rd),
+ VppGbpContractNextHop(sep4.vmac, sep4.epg.bd,
+ sep4.ip6, sep4.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c4.add_vpp_config()
+
+ p = (Ether(src=self.pg7.remote_mac,
+ dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4,
+ dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=444, gpid=441, flags=0x88) /
+ Ether(src="00:22:22:22:22:33", dst=str(self.router_mac)) /
+ IP(src="10.0.0.88", dst=ep1.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ # unknown remote EP to local EP redirected
+ rxs = self.send_and_expect(self.pg7, [p], sep1.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep1.mac)
+ self.assertEqual(rx[IP].src, "10.0.0.88")
+ self.assertEqual(rx[IP].dst, ep1.ip4)
+
+ # endpoint learnt via the parent GBP-vxlan interface
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l3._sw_if_index,
+ ip="10.0.0.88"))
+
+ p = (Ether(src=self.pg7.remote_mac,
+ dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4,
+ dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=444, gpid=441, flags=0x88) /
+ Ether(src="00:22:22:22:22:33", dst=str(self.router_mac)) /
+ IPv6(src="2001:10::88", dst=ep1.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ # unknown remote EP to local EP redirected (ipv6)
+ rxs = self.send_and_expect(self.pg7, [p], sep3.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep3.mac)
+ self.assertEqual(rx[IPv6].src, "2001:10::88")
+ self.assertEqual(rx[IPv6].dst, ep1.ip6)
+
+ # endpoint learnt via the parent GBP-vxlan interface
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l3._sw_if_index,
+ ip="2001:10::88"))
+
+ #
+ # L3 switch from local to remote EP
+ #
+ p4 = [(Ether(src=ep1.mac, dst=str(self.router_mac)) /
+ IP(src=ep1.ip4, dst="10.0.0.88") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))]
+ p6 = [(Ether(src=ep1.mac, dst=str(self.router_mac)) /
+ IPv6(src=ep1.ip6, dst="2001:10::88") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))]
+
+ rxs = self.send_and_expect(self.pg0, p4[0] * 17, sep1.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep1.mac)
+ self.assertEqual(rx[IP].src, ep1.ip4)
+ self.assertEqual(rx[IP].dst, "10.0.0.88")
+
+ rxs = self.send_and_expect(self.pg0, p6[0] * 17, sep4.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep4.mac)
+ self.assertEqual(rx[IPv6].src, ep1.ip6)
+ self.assertEqual(rx[IPv6].dst, "2001:10::88")
+
+ #
+ # test the dst-ip hash mode
+ #
+ c5 = VppGbpContract(
+ self, 402, epg_220.sclass, epg_221.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip4, sep1.epg.rd),
+ VppGbpContractNextHop(sep2.vmac, sep2.epg.bd,
+ sep2.ip4, sep2.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
+ [VppGbpContractNextHop(sep3.vmac, sep3.epg.bd,
+ sep3.ip6, sep3.epg.rd),
+ VppGbpContractNextHop(sep4.vmac, sep4.epg.bd,
+ sep4.ip6, sep4.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c5.add_vpp_config()
+
+ rxs = self.send_and_expect(self.pg0, p4[0] * 17, sep1.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep1.mac)
+ self.assertEqual(rx[IP].src, ep1.ip4)
+ self.assertEqual(rx[IP].dst, "10.0.0.88")
+
+ rxs = self.send_and_expect(self.pg0, p6[0] * 17, sep3.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep3.mac)
+ self.assertEqual(rx[IPv6].src, ep1.ip6)
+ self.assertEqual(rx[IPv6].dst, "2001:10::88")
+
+ #
+ # a programmed remote SEP in EPG 320
+ #
+
+ # gbp vxlan tunnel for the remote SEP
+ vx_tun_l3_sep = VppGbpVxlanTunnel(
+ self, 555, rd1.rd_id,
+ VppEnum.vl_api_gbp_vxlan_tunnel_mode_t.GBP_VXLAN_TUNNEL_MODE_L3,
+ self.pg2.local_ip4)
+ vx_tun_l3_sep.add_vpp_config()
+
+ # remote SEP
+ sep5 = VppGbpEndpoint(self, vx_tun_l3_sep,
+ epg_320, None,
+ "12.0.0.10", "13.0.0.10",
+ "4001:10::10", "5001:10::10",
+ ep_flags.GBP_API_ENDPOINT_FLAG_REMOTE,
+ self.pg7.local_ip4,
+ self.pg7.remote_ip4,
+ mac=None)
+ sep5.add_vpp_config()
+
+ #
+ # local l3out redirect tests
+ #
+
+ # add local l3out
+ # the external bd
+ self.loop4.set_mac(self.router_mac)
+ b_lo4_ip4 = VppIpInterfaceBind(self, self.loop4, t4).add_vpp_config()
+ b_lo4_ip6 = VppIpInterfaceBind(self, self.loop4, t6).add_vpp_config()
+ ebd = VppBridgeDomain(self, 100)
+ ebd.add_vpp_config()
+ gebd = VppGbpBridgeDomain(self, ebd, rd1, self.loop4, None, None)
+ gebd.add_vpp_config()
+ # the external epg
+ eepg = VppGbpEndpointGroup(self, 888, 765, rd1, gebd,
+ None, gebd.bvi,
+ "10.1.0.128",
+ "2001:10:1::128",
+ VppGbpEndpointRetention(60))
+ eepg.add_vpp_config()
+ # add subnets to BVI
+ VppIpInterfaceAddress(
+ self,
+ gebd.bvi,
+ "10.1.0.128",
+ 24, bind=b_lo4_ip4).add_vpp_config()
+ VppIpInterfaceAddress(
+ self,
+ gebd.bvi,
+ "2001:10:1::128",
+ 64, bind=b_lo4_ip6).add_vpp_config()
+ # ... which are L3-out subnets
+ VppGbpSubnet(self, rd1, "10.1.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=765).add_vpp_config()
+ VppGbpSubnet(self, rd1, "2001:10:1::128", 64,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=765).add_vpp_config()
+ # external endpoints
+ VppL2Vtr(self, self.vlan_100, L2_VTR_OP.L2_POP_1).add_vpp_config()
+ eep1 = VppGbpEndpoint(self, self.vlan_100, eepg, None, "10.1.0.1",
+ "11.1.0.1", "2001:10:1::1", "3001:10:1::1",
+ ep_flags.GBP_API_ENDPOINT_FLAG_EXTERNAL)
+ eep1.add_vpp_config()
+ VppL2Vtr(self, self.vlan_101, L2_VTR_OP.L2_POP_1).add_vpp_config()
+ eep2 = VppGbpEndpoint(self, self.vlan_101, eepg, None, "10.1.0.2",
+ "11.1.0.2", "2001:10:1::2", "3001:10:1::2",
+ ep_flags.GBP_API_ENDPOINT_FLAG_EXTERNAL)
+ eep2.add_vpp_config()
+
+ # external subnets reachable though eep1 and eep2 respectively
+ VppIpRoute(self, "10.220.0.0", 24,
+ [VppRoutePath(eep1.ip4, eep1.epg.bvi.sw_if_index)],
+ table_id=t4.table_id).add_vpp_config()
+ VppGbpSubnet(self, rd1, "10.220.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4220).add_vpp_config()
+ VppIpRoute(self, "10:220::", 64,
+ [VppRoutePath(eep1.ip6, eep1.epg.bvi.sw_if_index)],
+ table_id=t6.table_id).add_vpp_config()
+ VppGbpSubnet(self, rd1, "10:220::", 64,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4220).add_vpp_config()
+ VppIpRoute(self, "10.221.0.0", 24,
+ [VppRoutePath(eep2.ip4, eep2.epg.bvi.sw_if_index)],
+ table_id=t4.table_id).add_vpp_config()
+ VppGbpSubnet(self, rd1, "10.221.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4221).add_vpp_config()
+ VppIpRoute(self, "10:221::", 64,
+ [VppRoutePath(eep2.ip6, eep2.epg.bvi.sw_if_index)],
+ table_id=t6.table_id).add_vpp_config()
+ VppGbpSubnet(self, rd1, "10:221::", 64,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4221).add_vpp_config()
+
+ #
+ # l3out redirect to remote (known, then unknown) SEP
+ #
+
+ # packets from 1 external subnet to the other
+ p = [(Ether(src=eep1.mac, dst=self.router_mac) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.17", dst="10.221.0.65") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(src=eep1.mac, dst=self.router_mac) /
+ Dot1Q(vlan=100) /
+ IPv6(src="10:220::17", dst="10:221::65") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))]
+
+ # packets should be dropped in absence of contract
+ self.send_and_assert_no_replies(self.pg0, p)
+
+ # contract redirecting to sep5
+ VppGbpContract(
+ self, 402, 4220, 4221, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
+ [VppGbpContractNextHop(sep5.vmac, sep5.epg.bd,
+ sep5.ip4, sep5.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
+ [VppGbpContractNextHop(sep5.vmac, sep5.epg.bd,
+ sep5.ip6, sep5.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6]).add_vpp_config()
+
+ rxs = self.send_and_expect(self.pg0, p, self.pg7)
+
+ for rx, tx in zip(rxs, p):
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ # this should use the programmed remote leaf TEP
+ self.assertEqual(rx[VXLAN].vni, 555)
+ self.assertEqual(rx[VXLAN].gpid, 4220)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # redirect policy has been applied
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertTrue(rx[VXLAN].gpflags.D)
+ rxip = rx[VXLAN][Ether].payload
+ txip = tx[Dot1Q].payload
+ self.assertEqual(rxip.src, txip.src)
+ self.assertEqual(rxip.dst, txip.dst)
+
+ # remote SEP: it is now an unknown remote SEP and should go
+ # to spine proxy
+ sep5.remove_vpp_config()
+
+ rxs = self.send_and_expect(self.pg0, p, self.pg7)
+
+ for rx, tx in zip(rxs, p):
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ # this should use the spine proxy TEP
+ self.assertEqual(rx[VXLAN].vni, epg_320.bd.uu_fwd.vni)
+ self.assertEqual(rx[VXLAN].gpid, 4220)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # redirect policy has been applied
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertTrue(rx[VXLAN].gpflags.D)
+ rxip = rx[VXLAN][Ether].payload
+ txip = tx[Dot1Q].payload
+ self.assertEqual(rxip.src, txip.src)
+ self.assertEqual(rxip.dst, txip.dst)
+
+ #
+ # l3out redirect to local SEP
+ #
+
+ # change the contract between l3out to redirect to local SEPs
+ # instead of remote SEP
+ VppGbpContract(
+ self, 402, 4220, 4221, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip4, sep1.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip6, sep1.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6]).add_vpp_config()
+
+ rxs = self.send_and_expect(self.pg0, p, sep1.itf)
+ for rx, tx in zip(rxs, p):
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep1.mac)
+ rxip = rx[Ether].payload
+ txip = tx[Ether].payload
+ self.assertEqual(rxip.src, txip.src)
+ self.assertEqual(rxip.dst, txip.dst)
+
+ #
+ # redirect remote EP to remote (known then unknown) SEP
+ #
+
+ # remote SEP known again
+ sep5.add_vpp_config()
+
+ # contract to redirect to learnt SEP
+ VppGbpContract(
+ self, 402, epg_221.sclass, epg_222.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
+ [VppGbpContractNextHop(sep5.vmac, sep5.epg.bd,
+ sep5.ip4, sep5.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_DST_IP,
+ [VppGbpContractNextHop(sep5.vmac, sep5.epg.bd,
+ sep5.ip6, sep5.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6]).add_vpp_config()
+
+ # packets from unknown EP 221 to known EP in EPG 222
+ # should be redirected to known remote SEP
+ base = (Ether(src=self.pg7.remote_mac, dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4, dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=444, gpid=441, flags=0x88) /
+ Ether(src="00:22:22:22:22:44", dst=str(self.router_mac)))
+ p = [(base /
+ IP(src="10.0.1.100", dst=ep3.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (base /
+ IPv6(src="2001:10::100", dst=ep3.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))]
+
+ # unknown remote EP to local EP redirected to known remote SEP
+ rxs = self.send_and_expect(self.pg7, p, self.pg7)
+
+ for rx, tx in zip(rxs, p):
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ # this should use the programmed remote leaf TEP
+ self.assertEqual(rx[VXLAN].vni, 555)
+ self.assertEqual(rx[VXLAN].gpid, epg_221.sclass)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # redirect policy has been applied
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+ rxip = rx[VXLAN][Ether].payload
+ txip = tx[VXLAN][Ether].payload
+ self.assertEqual(rxip.src, txip.src)
+ self.assertEqual(rxip.dst, txip.dst)
+
+ # endpoint learnt via the parent GBP-vxlan interface
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l3._sw_if_index,
+ ip="10.0.1.100"))
+ self.assertTrue(find_gbp_endpoint(self,
+ vx_tun_l3._sw_if_index,
+ ip="2001:10::100"))
+
+ # remote SEP: it is now an unknown remote SEP and should go
+ # to spine proxy
+ sep5.remove_vpp_config()
+
+ # remote EP (coming from spine proxy) to local EP redirected to
+ # known remote SEP
+ rxs = self.send_and_expect(self.pg7, p, self.pg7)
+
+ for rx, tx in zip(rxs, p):
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ # this should use the spine proxy TEP
+ self.assertEqual(rx[VXLAN].vni, epg_320.bd.uu_fwd.vni)
+ self.assertEqual(rx[VXLAN].gpid, epg_221.sclass)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # redirect policy has been applied
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+ rxip = rx[VXLAN][Ether].payload
+ txip = tx[VXLAN][Ether].payload
+ self.assertEqual(rxip.src, txip.src)
+ self.assertEqual(rxip.dst, txip.dst)
+
+ #
+ # cleanup
+ #
+ self.pg7.unconfig_ip4()
+
+ def test_gbp_redirect_extended(self):
+ """ GBP Endpoint Redirect Extended """
+
+ self.vapi.cli("set logging class gbp level debug")
+
+ ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
+ routed_dst_mac = "00:0c:0c:0c:0c:0c"
+ routed_src_mac = "00:22:bd:f8:19:ff"
+
+ learnt = [{'mac': '00:00:11:11:11:02',
+ 'ip': '10.0.1.2',
+ 'ip6': '2001:10::2'},
+ {'mac': '00:00:11:11:11:03',
+ 'ip': '10.0.1.3',
+ 'ip6': '2001:10::3'}]
+
+ #
+ # IP tables
+ #
+ t4 = VppIpTable(self, 1)
+ t4.add_vpp_config()
+ t6 = VppIpTable(self, 1, True)
+ t6.add_vpp_config()
+
+ # create IPv4 and IPv6 RD UU VxLAN-GBP TEP and bind them to the right
+ # VRF
+ rd_uu4 = VppVxlanGbpTunnel(
+ self,
+ self.pg7.local_ip4,
+ self.pg7.remote_ip4,
+ 114,
+ mode=(VppEnum.vl_api_vxlan_gbp_api_tunnel_mode_t.
+ VXLAN_GBP_API_TUNNEL_MODE_L3))
+ rd_uu4.add_vpp_config()
+ VppIpInterfaceBind(self, rd_uu4, t4).add_vpp_config()
+
+ rd_uu6 = VppVxlanGbpTunnel(
+ self,
+ self.pg7.local_ip4,
+ self.pg7.remote_ip4,
+ 115,
+ mode=(VppEnum.vl_api_vxlan_gbp_api_tunnel_mode_t.
+ VXLAN_GBP_API_TUNNEL_MODE_L3))
+ rd_uu6.add_vpp_config()
+ VppIpInterfaceBind(self, rd_uu6, t4).add_vpp_config()
+
+ rd1 = VppGbpRouteDomain(self, 2, 402, t4, t6, rd_uu4, rd_uu6)
+ rd1.add_vpp_config()
+
+ self.loop0.set_mac(self.router_mac)
+ self.loop1.set_mac(self.router_mac)
+ self.loop2.set_mac(self.router_mac)
+
+ #
+ # Bind the BVI to the RD
+ #
+ b_lo0_ip4 = VppIpInterfaceBind(self, self.loop0, t4).add_vpp_config()
+ b_lo0_ip6 = VppIpInterfaceBind(self, self.loop0, t6).add_vpp_config()
+ b_lo1_ip4 = VppIpInterfaceBind(self, self.loop1, t4).add_vpp_config()
+ b_lo1_ip6 = VppIpInterfaceBind(self, self.loop1, t6).add_vpp_config()
+ b_lo2_ip4 = VppIpInterfaceBind(self, self.loop2, t4).add_vpp_config()
+ b_lo2_ip6 = VppIpInterfaceBind(self, self.loop2, t6).add_vpp_config()
+
+ #
+ # Pg7 hosts a BD's UU-fwd
+ #
+ self.pg7.config_ip4()
+ self.pg7.resolve_arp()
+
+ #
+ # a GBP bridge domains for the EPs
+ #
+ bd1 = VppBridgeDomain(self, 1)
+ bd1.add_vpp_config()
+ gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0)
+ gbd1.add_vpp_config()
+
+ bd2 = VppBridgeDomain(self, 2)
+ bd2.add_vpp_config()
+ gbd2 = VppGbpBridgeDomain(self, bd2, rd1, self.loop1)
+ gbd2.add_vpp_config()
+
+ # ... and has a /32 and /128 applied
+ ip4_addr1 = VppIpInterfaceAddress(self, gbd1.bvi,
+ "10.0.0.128", 32,
+ bind=b_lo0_ip4).add_vpp_config()
+ ip6_addr1 = VppIpInterfaceAddress(self, gbd1.bvi,
+ "2001:10::128", 128,
+ bind=b_lo0_ip6).add_vpp_config()
+ ip4_addr2 = VppIpInterfaceAddress(self, gbd2.bvi,
+ "10.0.1.128", 32,
+ bind=b_lo1_ip4).add_vpp_config()
+ ip6_addr2 = VppIpInterfaceAddress(self, gbd2.bvi,
+ "2001:11::128", 128,
+ bind=b_lo1_ip6).add_vpp_config()
+
+ #
+ # The Endpoint-groups
+ #
+ epg_220 = VppGbpEndpointGroup(self, 220, 440, rd1, gbd1,
+ None, gbd1.bvi,
+ "10.0.0.128",
+ "2001:10::128",
+ VppGbpEndpointRetention(60))
+ epg_220.add_vpp_config()
+ epg_221 = VppGbpEndpointGroup(self, 221, 441, rd1, gbd2,
+ None, gbd2.bvi,
+ "10.0.1.128",
+ "2001:11::128",
+ VppGbpEndpointRetention(60))
+ epg_221.add_vpp_config()
+
+ #
+ # a GBP bridge domains for the SEPs
+ #
+ bd_uu3 = VppVxlanGbpTunnel(self, self.pg7.local_ip4,
+ self.pg7.remote_ip4, 116)
+ bd_uu3.add_vpp_config()
+
+ bd3 = VppBridgeDomain(self, 3)
+ bd3.add_vpp_config()
+ gbd3 = VppGbpBridgeDomain(self, bd3, rd1, self.loop2,
+ bd_uu3, learn=False)
+ gbd3.add_vpp_config()
+
+ ip4_addr3 = VppIpInterfaceAddress(self, gbd3.bvi,
+ "12.0.0.128", 32,
+ bind=b_lo2_ip4).add_vpp_config()
+ ip6_addr3 = VppIpInterfaceAddress(self, gbd3.bvi,
+ "4001:10::128", 128,
+ bind=b_lo2_ip6).add_vpp_config()
+
+ #
+ # self.logger.info(self.vapi.cli("show gbp bridge"))
+ # self.logger.info(self.vapi.cli("show vxlan-gbp tunnel"))
+ # self.logger.info(self.vapi.cli("show gbp vxlan"))
+ # self.logger.info(self.vapi.cli("show int addr"))
+ #
+
+ #
+ # EPGs in which the service endpoints exist
+ #
+ epg_320 = VppGbpEndpointGroup(self, 320, 550, rd1, gbd3,
+ None, gbd3.bvi,
+ "12.0.0.128",
+ "4001:10::128",
+ VppGbpEndpointRetention(60))
+ epg_320.add_vpp_config()
+
+ #
+ # endpoints
+ #
+ ep1 = VppGbpEndpoint(self, self.pg0,
+ epg_220, None,
+ "10.0.0.1", "11.0.0.1",
+ "2001:10::1", "3001:10::1")
+ ep1.add_vpp_config()
+ ep2 = VppGbpEndpoint(self, self.pg1,
+ epg_221, None,
+ "10.0.1.1", "11.0.1.1",
+ "2001:11::1", "3001:11::1")
+ ep2.add_vpp_config()
+
+ #
+ # service endpoints
+ #
+ sep1 = VppGbpEndpoint(self, self.pg3,
+ epg_320, None,
+ "12.0.0.1", "13.0.0.1",
+ "4001:10::1", "5001:10::1")
+ sep2 = VppGbpEndpoint(self, self.pg4,
+ epg_320, None,
+ "12.0.0.2", "13.0.0.2",
+ "4001:10::2", "5001:10::2")
+
+ # sep1 and sep2 are not added to config yet
+ # they are unknown for now
+
+ #
+ # add routes to EPG subnets
+ #
+ VppGbpSubnet(self, rd1, "10.0.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_TRANSPORT
+ ).add_vpp_config()
+ VppGbpSubnet(self, rd1, "10.0.1.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_TRANSPORT
+ ).add_vpp_config()
+
+ #
+ # Local host to known local host in different BD
+ # with SFC contract (source and destination are in
+ # one node and service endpoint in another node)
+ #
+ p4 = [(Ether(src=ep1.mac, dst=str(self.router_mac)) /
+ IP(src=ep1.ip4, dst=ep2.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(src=ep2.mac, dst=str(self.router_mac)) /
+ IP(src=ep2.ip4, dst=ep1.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))]
+ p6 = [(Ether(src=ep1.mac, dst=str(self.router_mac)) /
+ IPv6(src=ep1.ip6, dst=ep2.ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(src=ep2.mac, dst=str(self.router_mac)) /
+ IPv6(src=ep2.ip6, dst=ep1.ip6) /
+ UDP(sport=1234, dport=1230) /
+ Raw(b'\xa5' * 100))]
+
+ # should be dropped since no contract yet
+ self.send_and_assert_no_replies(self.pg0, [p4[0]])
+ self.send_and_assert_no_replies(self.pg0, [p6[0]])
+
+ #
+ # Add a contract with a rule to load-balance redirect via SEP1 and SEP2
+ # one of the next-hops is via an EP that is not known
+ #
+ rule4 = AclRule(is_permit=1, proto=17)
+ rule6 = AclRule(src_prefix=IPv6Network((0, 0)),
+ dst_prefix=IPv6Network((0, 0)), is_permit=1, proto=17)
+ acl = VppAcl(self, rules=[rule4, rule6])
+ acl.add_vpp_config()
+
+ #
+ # test the src-ip hash mode
+ #
+ c1 = VppGbpContract(
+ self, 402, epg_220.sclass, epg_221.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip4, sep1.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip6, sep1.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c1.add_vpp_config()
+
+ c2 = VppGbpContract(
+ self, 402, epg_221.sclass, epg_220.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip4, sep1.epg.rd)]),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_REDIRECT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC,
+ [VppGbpContractNextHop(sep1.vmac, sep1.epg.bd,
+ sep1.ip6, sep1.epg.rd)])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c2.add_vpp_config()
+
+ # ep1 <--> ep2 redirected through sep1
+ # sep1 is unknown
+ # packet is redirected to sep bd and then go through sep bd UU
+
+ rxs = self.send_and_expect(self.pg0, p4[0] * 17, self.pg7)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ self.assertEqual(rx[VXLAN].vni, 116)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # redirect policy has been applied
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ inner = rx[VXLAN].payload
+
+ self.assertEqual(inner[Ether].src, routed_src_mac)
+ self.assertEqual(inner[Ether].dst, sep1.mac)
+ self.assertEqual(inner[IP].src, ep1.ip4)
+ self.assertEqual(inner[IP].dst, ep2.ip4)
+
+ rxs = self.send_and_expect(self.pg1, p4[1] * 17, self.pg7)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ self.assertEqual(rx[VXLAN].vni, 116)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # redirect policy has been applied
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ inner = rx[VXLAN].payload
+
+ self.assertEqual(inner[Ether].src, routed_src_mac)
+ self.assertEqual(inner[Ether].dst, sep1.mac)
+ self.assertEqual(inner[IP].src, ep2.ip4)
+ self.assertEqual(inner[IP].dst, ep1.ip4)
+
+ rxs = self.send_and_expect(self.pg0, p6[0] * 17, self.pg7)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ self.assertEqual(rx[VXLAN].vni, 116)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # redirect policy has been applied
+ inner = rx[VXLAN].payload
+
+ self.assertEqual(inner[Ether].src, routed_src_mac)
+ self.assertEqual(inner[Ether].dst, sep1.mac)
+ self.assertEqual(inner[IPv6].src, ep1.ip6)
+ self.assertEqual(inner[IPv6].dst, ep2.ip6)
+
+ rxs = self.send_and_expect(self.pg1, p6[1] * 17, self.pg7)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ self.assertEqual(rx[VXLAN].vni, 116)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # redirect policy has been applied
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ inner = rx[VXLAN].payload
+
+ self.assertEqual(inner[Ether].src, routed_src_mac)
+ self.assertEqual(inner[Ether].dst, sep1.mac)
+ self.assertEqual(inner[IPv6].src, ep2.ip6)
+ self.assertEqual(inner[IPv6].dst, ep1.ip6)
+
+ # configure sep1: it is now local
+ # packets between ep1 and ep2 are redirected locally
+ sep1.add_vpp_config()
+
+ rxs = self.send_and_expect(self.pg0, p4[0] * 17, sep1.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep1.mac)
+ self.assertEqual(rx[IP].src, ep1.ip4)
+ self.assertEqual(rx[IP].dst, ep2.ip4)
+
+ rxs = self.send_and_expect(self.pg1, p6[1] * 17, sep1.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, routed_src_mac)
+ self.assertEqual(rx[Ether].dst, sep1.mac)
+ self.assertEqual(rx[IPv6].src, ep2.ip6)
+ self.assertEqual(rx[IPv6].dst, ep1.ip6)
+
+ # packet coming from the l2 spine-proxy to sep1
+ p = (Ether(src=self.pg7.remote_mac,
+ dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4,
+ dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=116, gpid=440, gpflags=0x08, flags=0x88) /
+ Ether(src=str(self.router_mac), dst=sep1.mac) /
+ IP(src=ep1.ip4, dst=ep2.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg7, [p] * 17, sep1.itf)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, str(self.router_mac))
+ self.assertEqual(rx[Ether].dst, sep1.mac)
+ self.assertEqual(rx[IP].src, ep1.ip4)
+ self.assertEqual(rx[IP].dst, ep2.ip4)
+
+ # contract for SEP to communicate with dst EP
+ c3 = VppGbpContract(
+ self, 402, epg_320.sclass, epg_221.sclass, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SYMMETRIC)],
+ [ETH_P_IP, ETH_P_IPV6])
+ c3.add_vpp_config()
+
+ # temporarily remove ep2, so that ep2 is remote & unknown
+ ep2.remove_vpp_config()
+
+ # packet going back from sep1 to its original dest (ep2)
+ # as ep2 is now unknown (see above), it must go through
+ # the rd UU (packet is routed)
+
+ p1 = (Ether(src=sep1.mac, dst=self.router_mac) /
+ IP(src=ep1.ip4, dst=ep2.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg3, [p1] * 17, self.pg7)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ self.assertEqual(rx[VXLAN].vni, 114)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # redirect policy has been applied
+ inner = rx[VXLAN].payload
+ self.assertEqual(inner[Ether].src, routed_src_mac)
+ self.assertEqual(inner[Ether].dst, routed_dst_mac)
+ self.assertEqual(inner[IP].src, ep1.ip4)
+ self.assertEqual(inner[IP].dst, ep2.ip4)
+
+ self.logger.info(self.vapi.cli("show bridge 3 detail"))
+ sep1.remove_vpp_config()
+
+ self.logger.info(self.vapi.cli("show bridge 1 detail"))
+ self.logger.info(self.vapi.cli("show bridge 2 detail"))
+
+ # re-add ep2: it is local again :)
+ ep2.add_vpp_config()
+
+ # packet coming back from the remote sep through rd UU
+ p2 = (Ether(src=self.pg7.remote_mac,
+ dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4,
+ dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=114, gpid=441, gpflags=0x09, flags=0x88) /
+ Ether(src=str(self.router_mac), dst=self.router_mac) /
+ IP(src=ep1.ip4, dst=ep2.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg7, [p2], self.pg1)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, str(self.router_mac))
+ self.assertEqual(rx[Ether].dst, self.pg1.remote_mac)
+ self.assertEqual(rx[IP].src, ep1.ip4)
+ self.assertEqual(rx[IP].dst, ep2.ip4)
+
+ #
+ # bd_uu2.add_vpp_config()
+ #
+
+ #
+ # cleanup
+ #
+ c1.remove_vpp_config()
+ c2.remove_vpp_config()
+ c3.remove_vpp_config()
+ self.pg7.unconfig_ip4()
+
+ def test_gbp_l3_out(self):
+ """ GBP L3 Out """
+
+ ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
+ self.vapi.cli("set logging class gbp level debug")
+
+ routed_dst_mac = "00:0c:0c:0c:0c:0c"
+ routed_src_mac = "00:22:bd:f8:19:ff"
+
+ #
+ # IP tables
+ #
+ t4 = VppIpTable(self, 1)
+ t4.add_vpp_config()
+ t6 = VppIpTable(self, 1, True)
+ t6.add_vpp_config()
+
+ rd1 = VppGbpRouteDomain(self, 2, 55, t4, t6)
+ rd1.add_vpp_config()
+
+ self.loop0.set_mac(self.router_mac)
+
+ #
+ # Bind the BVI to the RD
+ #
+ b_ip4 = VppIpInterfaceBind(self, self.loop0, t4).add_vpp_config()
+ b_ip6 = VppIpInterfaceBind(self, self.loop0, t6).add_vpp_config()
+
+ #
+ # Pg7 hosts a BD's BUM
+ # Pg1 some other l3 interface
+ #
+ self.pg7.config_ip4()
+ self.pg7.resolve_arp()
+
+ #
+ # a multicast vxlan-gbp tunnel for broadcast in the BD
+ #
+ tun_bm = VppVxlanGbpTunnel(self, self.pg7.local_ip4,
+ "239.1.1.1", 88,
+ mcast_itf=self.pg7)
+ tun_bm.add_vpp_config()
+
+ #
+ # a GBP external bridge domains for the EPs
+ #
+ bd1 = VppBridgeDomain(self, 1)
+ bd1.add_vpp_config()
+ gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0, None, tun_bm)
+ gbd1.add_vpp_config()
+
+ #
+ # The Endpoint-groups in which the external endpoints exist
+ #
+ epg_220 = VppGbpEndpointGroup(self, 220, 113, rd1, gbd1,
+ None, gbd1.bvi,
+ "10.0.0.128",
+ "2001:10::128",
+ VppGbpEndpointRetention(4))
+ epg_220.add_vpp_config()
+
+ # the BVIs have the subnets applied ...
+ ip4_addr = VppIpInterfaceAddress(self, gbd1.bvi, "10.0.0.128",
+ 24, bind=b_ip4).add_vpp_config()
+ ip6_addr = VppIpInterfaceAddress(self, gbd1.bvi, "2001:10::128",
+ 64, bind=b_ip6).add_vpp_config()
+
+ # ... which are L3-out subnets
+ l3o_1 = VppGbpSubnet(
+ self, rd1, "10.0.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=113)
+ l3o_1.add_vpp_config()
+
+ #
+ # an external interface attached to the outside world and the
+ # external BD
+ #
+ VppL2Vtr(self, self.vlan_100, L2_VTR_OP.L2_POP_1).add_vpp_config()
+ VppL2Vtr(self, self.vlan_101, L2_VTR_OP.L2_POP_1).add_vpp_config()
+ vlan_144 = VppDot1QSubint(self, self.pg0, 144)
+ vlan_144.admin_up()
+ # vlan_102 is not poped
+
+ #
+ # an unicast vxlan-gbp for inter-RD traffic
+ #
+ vx_tun_l3 = VppGbpVxlanTunnel(
+ self, 444, rd1.rd_id,
+ VppEnum.vl_api_gbp_vxlan_tunnel_mode_t.GBP_VXLAN_TUNNEL_MODE_L3,
+ self.pg2.local_ip4)
+ vx_tun_l3.add_vpp_config()
+
+ #
+ # External Endpoints
+ #
+ eep1 = VppGbpEndpoint(self, self.vlan_100,
+ epg_220, None,
+ "10.0.0.1", "11.0.0.1",
+ "2001:10::1", "3001::1",
+ ep_flags.GBP_API_ENDPOINT_FLAG_EXTERNAL)
+ eep1.add_vpp_config()
+ eep2 = VppGbpEndpoint(self, self.vlan_101,
+ epg_220, None,
+ "10.0.0.2", "11.0.0.2",
+ "2001:10::2", "3001::2",
+ ep_flags.GBP_API_ENDPOINT_FLAG_EXTERNAL)
+ eep2.add_vpp_config()
+ eep3 = VppGbpEndpoint(self, self.vlan_102,
+ epg_220, None,
+ "10.0.0.3", "11.0.0.3",
+ "2001:10::3", "3001::3",
+ ep_flags.GBP_API_ENDPOINT_FLAG_EXTERNAL)
+ eep3.add_vpp_config()
+
+ #
+ # A remote external endpoint
+ #
+ rep = VppGbpEndpoint(self, vx_tun_l3,
+ epg_220, None,
+ "10.0.0.101", "11.0.0.101",
+ "2001:10::101", "3001::101",
+ ep_flags.GBP_API_ENDPOINT_FLAG_REMOTE,
+ self.pg7.local_ip4,
+ self.pg7.remote_ip4,
+ mac=None)
+ rep.add_vpp_config()
+
+ #
+ # EP1 impersonating EP3 is dropped
+ #
+ p = (Ether(src=eep1.mac, dst="ff:ff:ff:ff:ff:ff") /
+ Dot1Q(vlan=100) /
+ ARP(op="who-has",
+ psrc="10.0.0.3", pdst="10.0.0.128",
+ hwsrc=eep1.mac, hwdst="ff:ff:ff:ff:ff:ff"))
+ self.send_and_assert_no_replies(self.pg0, p)
+
+ #
+ # ARP packet from External EPs are accepted and replied to
+ #
+ p_arp = (Ether(src=eep1.mac, dst="ff:ff:ff:ff:ff:ff") /
+ Dot1Q(vlan=100) /
+ ARP(op="who-has",
+ psrc=eep1.ip4, pdst="10.0.0.128",
+ hwsrc=eep1.mac, hwdst="ff:ff:ff:ff:ff:ff"))
+ rxs = self.send_and_expect(self.pg0, p_arp * 1, self.pg0)
+
+ #
+ # ARP packet from host in remote subnet are accepted and replied to
+ #
+ p_arp = (Ether(src=eep3.mac, dst="ff:ff:ff:ff:ff:ff") /
+ Dot1Q(vlan=102) /
+ ARP(op="who-has",
+ psrc=eep3.ip4, pdst="10.0.0.128",
+ hwsrc=eep3.mac, hwdst="ff:ff:ff:ff:ff:ff"))
+ rxs = self.send_and_expect(self.pg0, p_arp * 1, self.pg0)
+
+ #
+ # packets destined to unknown addresses in the BVI's subnet
+ # are ARP'd for
+ #
+ p4 = (Ether(src=eep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.0.0.1", dst="10.0.0.88") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ p6 = (Ether(src=eep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IPv6(src="2001:10::1", dst="2001:10::88") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p4 * 1, self.pg7)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ # self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, "239.1.1.1")
+ self.assertEqual(rx[VXLAN].vni, 88)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # policy was applied to the original IP packet
+ self.assertEqual(rx[VXLAN].gpid, 113)
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ self.assertFalse(rx[VXLAN].gpflags.D)
+
+ inner = rx[VXLAN].payload
+
+ self.assertTrue(inner.haslayer(ARP))
+
+ #
+ # remote to external
+ #
+ p = (Ether(src=self.pg7.remote_mac,
+ dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4,
+ dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=444, gpid=113, flags=0x88) /
+ Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
+ IP(src="10.0.0.101", dst="10.0.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg7, p * 1, self.pg0)
+
+ #
+ # local EP pings router
+ #
+ p = (Ether(src=eep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src=eep1.ip4, dst="10.0.0.128") /
+ ICMP(type='echo-request'))
+
+ rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, str(self.router_mac))
+ self.assertEqual(rx[Ether].dst, eep1.mac)
+ self.assertEqual(rx[Dot1Q].vlan, 100)
+
+ #
+ # local EP pings other local EP
+ #
+ p = (Ether(src=eep1.mac, dst=eep2.mac) /
+ Dot1Q(vlan=100) /
+ IP(src=eep1.ip4, dst=eep2.ip4) /
+ ICMP(type='echo-request'))
+
+ rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, eep1.mac)
+ self.assertEqual(rx[Ether].dst, eep2.mac)
+ self.assertEqual(rx[Dot1Q].vlan, 101)
+
+ #
+ # local EP pings router w/o vlan tag poped
+ #
+ p = (Ether(src=eep3.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=102) /
+ IP(src=eep3.ip4, dst="10.0.0.128") /
+ ICMP(type='echo-request'))
+
+ rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, str(self.router_mac))
+ self.assertEqual(rx[Ether].dst, self.vlan_102.remote_mac)
+
+ #
+ # A ip4 subnet reachable through the external EP1
+ #
+ ip_220 = VppIpRoute(self, "10.220.0.0", 24,
+ [VppRoutePath(eep1.ip4,
+ eep1.epg.bvi.sw_if_index)],
+ table_id=t4.table_id)
+ ip_220.add_vpp_config()
+
+ l3o_220 = VppGbpSubnet(
+ self, rd1, "10.220.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4220)
+ l3o_220.add_vpp_config()
+
+ #
+ # An ip6 subnet reachable through the external EP1
+ #
+ ip6_220 = VppIpRoute(self, "10:220::", 64,
+ [VppRoutePath(eep1.ip6,
+ eep1.epg.bvi.sw_if_index)],
+ table_id=t6.table_id)
+ ip6_220.add_vpp_config()
+
+ l3o6_220 = VppGbpSubnet(
+ self, rd1, "10:220::", 64,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4220)
+ l3o6_220.add_vpp_config()
+
+ #
+ # A subnet reachable through the external EP2
+ #
+ ip_221 = VppIpRoute(self, "10.221.0.0", 24,
+ [VppRoutePath(eep2.ip4,
+ eep2.epg.bvi.sw_if_index)],
+ table_id=t4.table_id)
+ ip_221.add_vpp_config()
+
+ l3o_221 = VppGbpSubnet(
+ self, rd1, "10.221.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4221)
+ l3o_221.add_vpp_config()
+
+ #
+ # ping between hosts in remote subnets
+ # dropped without a contract
+ #
+ p = (Ether(src=eep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.1", dst="10.221.0.1") /
+ ICMP(type='echo-request'))
+
+ self.send_and_assert_no_replies(self.pg0, p * 1)
+
+ #
+ # contract for the external nets to communicate
+ #
+ rule4 = AclRule(is_permit=1, proto=17)
+ rule6 = AclRule(src_prefix=IPv6Network((0, 0)),
+ dst_prefix=IPv6Network((0, 0)), is_permit=1, proto=17)
+ acl = VppAcl(self, rules=[rule4, rule6])
+ acl.add_vpp_config()
+
+ #
+ # A contract with the wrong scope is not matched
+ #
+ c_44 = VppGbpContract(
+ self, 44, 4220, 4221, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c_44.add_vpp_config()
+ self.send_and_assert_no_replies(self.pg0, p * 1)
+
+ c1 = VppGbpContract(
+ self, 55, 4220, 4221, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c1.add_vpp_config()
+
+ #
+ # Contracts allowing ext-net 200 to talk with external EPs
+ #
+ c2 = VppGbpContract(
+ self, 55, 4220, 113, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c2.add_vpp_config()
+ c3 = VppGbpContract(
+ self, 55, 113, 4220, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c3.add_vpp_config()
+
+ #
+ # ping between hosts in remote subnets
+ #
+ p = (Ether(src=eep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.1", dst="10.221.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, str(self.router_mac))
+ self.assertEqual(rx[Ether].dst, eep2.mac)
+ self.assertEqual(rx[Dot1Q].vlan, 101)
+
+ # we did not learn these external hosts
+ self.assertFalse(find_gbp_endpoint(self, ip="10.220.0.1"))
+ self.assertFalse(find_gbp_endpoint(self, ip="10.221.0.1"))
+
+ #
+ # from remote external EP to local external EP
+ #
+ p = (Ether(src=self.pg7.remote_mac,
+ dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4,
+ dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=444, gpid=113, flags=0x88) /
+ Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
+ IP(src="10.0.0.101", dst="10.220.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg7, p * 1, self.pg0)
+
+ #
+ # ping from an external host to the remote external EP
+ #
+ p = (Ether(src=eep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.1", dst=rep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * 1, self.pg7)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ # self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ self.assertEqual(rx[VXLAN].vni, 444)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # the sclass of the ext-net the packet came from
+ self.assertEqual(rx[VXLAN].gpid, 4220)
+ # policy was applied to the original IP packet
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ # since it's an external host the reciever should not learn it
+ self.assertTrue(rx[VXLAN].gpflags.D)
+ inner = rx[VXLAN].payload
+ self.assertEqual(inner[IP].src, "10.220.0.1")
+ self.assertEqual(inner[IP].dst, rep.ip4)
+
+ #
+ # An external subnet reachable via the remote external EP
+ #
+
+ #
+ # first the VXLAN-GBP tunnel over which it is reached
+ #
+ vx_tun_r1 = VppVxlanGbpTunnel(
+ self, self.pg7.local_ip4,
+ self.pg7.remote_ip4, 445,
+ mode=(VppEnum.vl_api_vxlan_gbp_api_tunnel_mode_t.
+ VXLAN_GBP_API_TUNNEL_MODE_L3))
+ vx_tun_r1.add_vpp_config()
+ VppIpInterfaceBind(self, vx_tun_r1, t4).add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh vxlan-gbp tunnel"))
+
+ #
+ # then the special adj to resolve through on that tunnel
+ #
+ n1 = VppNeighbor(self,
+ vx_tun_r1.sw_if_index,
+ "00:0c:0c:0c:0c:0c",
+ self.pg7.remote_ip4)
+ n1.add_vpp_config()
+
+ #
+ # the route via the adj above
+ #
+ ip_222 = VppIpRoute(self, "10.222.0.0", 24,
+ [VppRoutePath(self.pg7.remote_ip4,
+ vx_tun_r1.sw_if_index)],
+ table_id=t4.table_id)
+ ip_222.add_vpp_config()
+
+ l3o_222 = VppGbpSubnet(
+ self, rd1, "10.222.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4222)
+ l3o_222.add_vpp_config()
+
+ #
+ # ping between hosts in local and remote external subnets
+ # dropped without a contract
+ #
+ p = (Ether(src=eep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.1", dst="10.222.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_assert_no_replies(self.pg0, p * 1)
+
+ #
+ # Add contracts ext-nets for 220 -> 222
+ #
+ c4 = VppGbpContract(
+ self, 55, 4220, 4222, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c4.add_vpp_config()
+
+ #
+ # ping from host in local to remote external subnets
+ #
+ p = (Ether(src=eep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.1", dst="10.222.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * 3, self.pg7)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ self.assertEqual(rx[VXLAN].vni, 445)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # the sclass of the ext-net the packet came from
+ self.assertEqual(rx[VXLAN].gpid, 4220)
+ # policy was applied to the original IP packet
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ # since it's an external host the reciever should not learn it
+ self.assertTrue(rx[VXLAN].gpflags.D)
+ inner = rx[VXLAN].payload
+ self.assertEqual(inner[Ether].dst, "00:0c:0c:0c:0c:0c")
+ self.assertEqual(inner[IP].src, "10.220.0.1")
+ self.assertEqual(inner[IP].dst, "10.222.0.1")
+
+ #
+ # make the external subnet ECMP
+ #
+ vx_tun_r2 = VppVxlanGbpTunnel(
+ self, self.pg7.local_ip4,
+ self.pg7.remote_ip4, 446,
+ mode=(VppEnum.vl_api_vxlan_gbp_api_tunnel_mode_t.
+ VXLAN_GBP_API_TUNNEL_MODE_L3))
+ vx_tun_r2.add_vpp_config()
+ VppIpInterfaceBind(self, vx_tun_r2, t4).add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh vxlan-gbp tunnel"))
+
+ n2 = VppNeighbor(self,
+ vx_tun_r2.sw_if_index,
+ "00:0c:0c:0c:0c:0c",
+ self.pg7.remote_ip4)
+ n2.add_vpp_config()
+
+ ip_222.modify([VppRoutePath(self.pg7.remote_ip4,
+ vx_tun_r1.sw_if_index),
+ VppRoutePath(self.pg7.remote_ip4,
+ vx_tun_r2.sw_if_index)])
+
+ #
+ # now expect load-balance
+ #
+ p = [(Ether(src=eep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.1", dst="10.222.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(src=eep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.1", dst="10.222.0.1") /
+ UDP(sport=1222, dport=1235) /
+ Raw(b'\xa5' * 100))]
+
+ rxs = self.send_and_expect(self.pg0, p, self.pg7)
+
+ self.assertEqual(rxs[0][VXLAN].vni, 445)
+ self.assertEqual(rxs[1][VXLAN].vni, 446)
+
+ #
+ # Same LB test for v6
+ #
+ n3 = VppNeighbor(self,
+ vx_tun_r1.sw_if_index,
+ "00:0c:0c:0c:0c:0c",
+ self.pg7.remote_ip6)
+ n3.add_vpp_config()
+ n4 = VppNeighbor(self,
+ vx_tun_r2.sw_if_index,
+ "00:0c:0c:0c:0c:0c",
+ self.pg7.remote_ip6)
+ n4.add_vpp_config()
+
+ ip_222_6 = VppIpRoute(self, "10:222::", 64,
+ [VppRoutePath(self.pg7.remote_ip6,
+ vx_tun_r1.sw_if_index),
+ VppRoutePath(self.pg7.remote_ip6,
+ vx_tun_r2.sw_if_index)],
+ table_id=t6.table_id)
+ ip_222_6.add_vpp_config()
+
+ l3o_222_6 = VppGbpSubnet(
+ self, rd1, "10:222::", 64,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4222)
+ l3o_222_6.add_vpp_config()
+
+ p = [(Ether(src=eep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IPv6(src="10:220::1", dst="10:222::1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(src=eep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IPv6(src="10:220::1", dst="10:222::1") /
+ UDP(sport=7777, dport=8881) /
+ Raw(b'\xa5' * 100))]
+
+ self.logger.info(self.vapi.cli("sh ip6 fib 10:222::1"))
+ rxs = self.send_and_expect(self.pg0, p, self.pg7)
+
+ self.assertEqual(rxs[0][VXLAN].vni, 445)
+ self.assertEqual(rxs[1][VXLAN].vni, 446)
+
+ #
+ # ping from host in remote to local external subnets
+ # there's no contract for this, but the A bit is set.
+ #
+ p = (Ether(src=self.pg7.remote_mac, dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4, dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=445, gpid=4222, flags=0x88, gpflags='A') /
+ Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
+ IP(src="10.222.0.1", dst="10.220.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg7, p * 3, self.pg0)
+ self.assertFalse(find_gbp_endpoint(self, ip="10.222.0.1"))
+
+ #
+ # ping from host in remote to remote external subnets
+ # this is dropped by reflection check.
+ #
+ p = (Ether(src=self.pg7.remote_mac, dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4, dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=445, gpid=4222, flags=0x88, gpflags='A') /
+ Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
+ IP(src="10.222.0.1", dst="10.222.0.2") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_assert_no_replies(self.pg7, p * 3)
+
+ p = (Ether(src=self.pg7.remote_mac, dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4, dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=445, gpid=4222, flags=0x88, gpflags='A') /
+ Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
+ IPv6(src="10:222::1", dst="10:222::2") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_assert_no_replies(self.pg7, p * 3)
+
+ #
+ # local EP
+ #
+ lep1 = VppGbpEndpoint(self, vlan_144,
+ epg_220, None,
+ "10.0.0.44", "11.0.0.44",
+ "2001:10::44", "3001::44")
+ lep1.add_vpp_config()
+
+ #
+ # local EP to local ip4 external subnet
+ #
+ p = (Ether(src=lep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=144) /
+ IP(src=lep1.ip4, dst="10.220.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, str(self.router_mac))
+ self.assertEqual(rx[Ether].dst, eep1.mac)
+ self.assertEqual(rx[Dot1Q].vlan, 100)
+
+ #
+ # local EP to local ip6 external subnet
+ #
+ p = (Ether(src=lep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=144) /
+ IPv6(src=lep1.ip6, dst="10:220::1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, str(self.router_mac))
+ self.assertEqual(rx[Ether].dst, eep1.mac)
+ self.assertEqual(rx[Dot1Q].vlan, 100)
+
+ #
+ # ip4 and ip6 subnets that load-balance
+ #
+ ip_20 = VppIpRoute(self, "10.20.0.0", 24,
+ [VppRoutePath(eep1.ip4,
+ eep1.epg.bvi.sw_if_index),
+ VppRoutePath(eep2.ip4,
+ eep2.epg.bvi.sw_if_index)],
+ table_id=t4.table_id)
+ ip_20.add_vpp_config()
+
+ l3o_20 = VppGbpSubnet(
+ self, rd1, "10.20.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4220)
+ l3o_20.add_vpp_config()
+
+ ip6_20 = VppIpRoute(self, "10:20::", 64,
+ [VppRoutePath(eep1.ip6,
+ eep1.epg.bvi.sw_if_index),
+ VppRoutePath(eep2.ip6,
+ eep2.epg.bvi.sw_if_index)],
+ table_id=t6.table_id)
+ ip6_20.add_vpp_config()
+
+ l3o6_20 = VppGbpSubnet(
+ self, rd1, "10:20::", 64,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4220)
+ l3o6_20.add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh ip fib 10.20.0.1"))
+ self.logger.info(self.vapi.cli("sh ip6 fib 10:20::1"))
+
+ # two ip6 packets whose port are chosen so they load-balance
+ p = [(Ether(src=lep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=144) /
+ IPv6(src=lep1.ip6, dst="10:20::1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(src=lep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=144) /
+ IPv6(src=lep1.ip6, dst="10:20::1") /
+ UDP(sport=124, dport=1230) /
+ Raw(b'\xa5' * 100))]
+
+ rxs = self.send_and_expect(self.pg0, p, self.pg0, 2)
+
+ self.assertEqual(rxs[0][Dot1Q].vlan, 101)
+ self.assertEqual(rxs[1][Dot1Q].vlan, 100)
+
+ # two ip4 packets whose port are chosen so they load-balance
+ p = [(Ether(src=lep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=144) /
+ IP(src=lep1.ip4, dst="10.20.0.1") /
+ UDP(sport=1235, dport=1235) /
+ Raw(b'\xa5' * 100)),
+ (Ether(src=lep1.mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=144) /
+ IP(src=lep1.ip4, dst="10.20.0.1") /
+ UDP(sport=124, dport=1230) /
+ Raw(b'\xa5' * 100))]
+
+ rxs = self.send_and_expect(self.pg0, p, self.pg0, 2)
+
+ self.assertEqual(rxs[0][Dot1Q].vlan, 101)
+ self.assertEqual(rxs[1][Dot1Q].vlan, 100)
+
+ #
+ # cleanup
+ #
+ ip_222.remove_vpp_config()
+ self.pg7.unconfig_ip4()
+ self.vlan_101.set_vtr(L2_VTR_OP.L2_DISABLED)
+ self.vlan_100.set_vtr(L2_VTR_OP.L2_DISABLED)
+
+ def test_gbp_anon_l3_out(self):
+ """ GBP Anonymous L3 Out """
+
+ ep_flags = VppEnum.vl_api_gbp_endpoint_flags_t
+ self.vapi.cli("set logging class gbp level debug")
+
+ routed_dst_mac = "00:0c:0c:0c:0c:0c"
+ routed_src_mac = "00:22:bd:f8:19:ff"
+
+ #
+ # IP tables
+ #
+ t4 = VppIpTable(self, 1)
+ t4.add_vpp_config()
+ t6 = VppIpTable(self, 1, True)
+ t6.add_vpp_config()
+
+ rd1 = VppGbpRouteDomain(self, 2, 55, t4, t6)
+ rd1.add_vpp_config()
+
+ self.loop0.set_mac(self.router_mac)
+
+ #
+ # Bind the BVI to the RD
+ #
+ bind_l0_ip4 = VppIpInterfaceBind(self, self.loop0, t4).add_vpp_config()
+ bind_l0_ip6 = VppIpInterfaceBind(self, self.loop0, t6).add_vpp_config()
+
+ #
+ # Pg7 hosts a BD's BUM
+ # Pg1 some other l3 interface
+ #
+ self.pg7.config_ip4()
+ self.pg7.resolve_arp()
+
+ #
+ # a GBP external bridge domains for the EPs
+ #
+ bd1 = VppBridgeDomain(self, 1)
+ bd1.add_vpp_config()
+ gbd1 = VppGbpBridgeDomain(self, bd1, rd1, self.loop0, None, None)
+ gbd1.add_vpp_config()
+
+ #
+ # The Endpoint-groups in which the external endpoints exist
+ #
+ epg_220 = VppGbpEndpointGroup(self, 220, 113, rd1, gbd1,
+ None, gbd1.bvi,
+ "10.0.0.128",
+ "2001:10::128",
+ VppGbpEndpointRetention(4))
+ epg_220.add_vpp_config()
+
+ # the BVIs have the subnet applied ...
+ ip4_addr = VppIpInterfaceAddress(self, gbd1.bvi,
+ "10.0.0.128", 24,
+ bind=bind_l0_ip4).add_vpp_config()
+
+ # ... which is an Anonymous L3-out subnets
+ l3o_1 = VppGbpSubnet(
+ self, rd1, "10.0.0.0", 24,
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_ANON_L3_OUT,
+ sclass=113)
+ l3o_1.add_vpp_config()
+
+ #
+ # an external interface attached to the outside world and the
+ # external BD
+ #
+ VppL2Vtr(self, self.vlan_100, L2_VTR_OP.L2_POP_1).add_vpp_config()
+ VppL2Vtr(self, self.vlan_101, L2_VTR_OP.L2_POP_1).add_vpp_config()
+
+ #
+ # vlan_100 and vlan_101 are anonymous l3-out interfaces
+ #
+ ext_itf = VppGbpExtItf(self, self.vlan_100, bd1, rd1, anon=True)
+ ext_itf.add_vpp_config()
+ ext_itf = VppGbpExtItf(self, self.vlan_101, bd1, rd1, anon=True)
+ ext_itf.add_vpp_config()
+
+ #
+ # an unicast vxlan-gbp for inter-RD traffic
+ #
+ vx_tun_l3 = VppGbpVxlanTunnel(
+ self, 444, rd1.rd_id,
+ VppEnum.vl_api_gbp_vxlan_tunnel_mode_t.GBP_VXLAN_TUNNEL_MODE_L3,
+ self.pg2.local_ip4)
+ vx_tun_l3.add_vpp_config()
+
+ #
+ # A remote external endpoint
+ #
+ rep = VppGbpEndpoint(self, vx_tun_l3,
+ epg_220, None,
+ "10.0.0.201", "11.0.0.201",
+ "2001:10::201", "3001::101",
+ ep_flags.GBP_API_ENDPOINT_FLAG_REMOTE,
+ self.pg7.local_ip4,
+ self.pg7.remote_ip4,
+ mac=None)
+ rep.add_vpp_config()
+
+ #
+ # ARP packet from host in external subnet are accepted, flooded and
+ # replied to. We expect 2 packets:
+ # - APR request flooded over the other vlan subif
+ # - ARP reply from BVI
+ #
+ p_arp = (Ether(src=self.vlan_100.remote_mac,
+ dst="ff:ff:ff:ff:ff:ff") /
+ Dot1Q(vlan=100) /
+ ARP(op="who-has",
+ psrc="10.0.0.100",
+ pdst="10.0.0.128",
+ hwsrc=self.vlan_100.remote_mac,
+ hwdst="ff:ff:ff:ff:ff:ff"))
+ rxs = self.send_and_expect(self.pg0, p_arp * 1, self.pg0, n_rx=2)
+
+ p_arp = (Ether(src=self.vlan_101.remote_mac,
+ dst="ff:ff:ff:ff:ff:ff") /
+ Dot1Q(vlan=101) /
+ ARP(op="who-has",
+ psrc='10.0.0.101',
+ pdst="10.0.0.128",
+ hwsrc=self.vlan_101.remote_mac,
+ hwdst="ff:ff:ff:ff:ff:ff"))
+ rxs = self.send_and_expect(self.pg0, p_arp * 1, self.pg0, n_rx=2)
+
+ #
+ # remote to external
+ #
+ p = (Ether(src=self.pg7.remote_mac,
+ dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4,
+ dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=vx_tun_l3.vni, gpid=epg_220.sclass, flags=0x88) /
+ Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
+ IP(src=str(rep.ip4), dst="10.0.0.100") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ rxs = self.send_and_expect(self.pg7, p * 1, self.pg0)
+
+ #
+ # local EP pings router
+ #
+ p = (Ether(src=self.vlan_100.remote_mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.0.0.100", dst="10.0.0.128") /
+ ICMP(type='echo-request'))
+ rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, str(self.router_mac))
+ self.assertEqual(rx[Ether].dst, self.vlan_100.remote_mac)
+ self.assertEqual(rx[Dot1Q].vlan, 100)
+
+ #
+ # local EP pings other local EP
+ #
+ p = (Ether(src=self.vlan_100.remote_mac,
+ dst=self.vlan_101.remote_mac) /
+ Dot1Q(vlan=100) /
+ IP(src="10.0.0.100", dst="10.0.0.101") /
+ ICMP(type='echo-request'))
+ rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.vlan_100.remote_mac)
+ self.assertEqual(rx[Ether].dst, self.vlan_101.remote_mac)
+ self.assertEqual(rx[Dot1Q].vlan, 101)
+
+ #
+ # A subnet reachable through an external router on vlan 100
+ #
+ ip_220 = VppIpRoute(self, "10.220.0.0", 24,
+ [VppRoutePath("10.0.0.100",
+ epg_220.bvi.sw_if_index)],
+ table_id=t4.table_id)
+ ip_220.add_vpp_config()
+
+ l3o_220 = VppGbpSubnet(
+ self, rd1, "10.220.0.0", 24,
+ # note: this a "regular" L3 out subnet (not connected)
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4220)
+ l3o_220.add_vpp_config()
+
+ #
+ # A subnet reachable through an external router on vlan 101
+ #
+ ip_221 = VppIpRoute(self, "10.221.0.0", 24,
+ [VppRoutePath("10.0.0.101",
+ epg_220.bvi.sw_if_index)],
+ table_id=t4.table_id)
+ ip_221.add_vpp_config()
+
+ l3o_221 = VppGbpSubnet(
+ self, rd1, "10.221.0.0", 24,
+ # note: this a "regular" L3 out subnet (not connected)
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4221)
+ l3o_221.add_vpp_config()
+
+ #
+ # ping between hosts in remote subnets
+ # dropped without a contract
+ #
+ p = (Ether(src=self.vlan_100.remote_mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.1", dst="10.221.0.1") /
+ ICMP(type='echo-request'))
+
+ rxs = self.send_and_assert_no_replies(self.pg0, p * 1)
+
+ #
+ # contract for the external nets to communicate
+ #
+ rule4 = AclRule(is_permit=1, proto=17)
+ rule6 = AclRule(src_prefix=IPv6Network((0, 0)),
+ dst_prefix=IPv6Network((0, 0)), is_permit=1, proto=17)
+ acl = VppAcl(self, rules=[rule4, rule6])
+ acl.add_vpp_config()
+
+ c1 = VppGbpContract(
+ self, 55, 4220, 4221, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c1.add_vpp_config()
+
+ #
+ # Contracts allowing ext-net 200 to talk with external EPs
+ #
+ c2 = VppGbpContract(
+ self, 55, 4220, 113, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c2.add_vpp_config()
+ c3 = VppGbpContract(
+ self, 55, 113, 4220, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c3.add_vpp_config()
+
+ #
+ # ping between hosts in remote subnets
+ #
+ p = (Ether(src=self.vlan_100.remote_mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.1", dst="10.221.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * 1, self.pg0)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, str(self.router_mac))
+ self.assertEqual(rx[Ether].dst, self.vlan_101.remote_mac)
+ self.assertEqual(rx[Dot1Q].vlan, 101)
+
+ # we did not learn these external hosts
+ self.assertFalse(find_gbp_endpoint(self, ip="10.220.0.1"))
+ self.assertFalse(find_gbp_endpoint(self, ip="10.221.0.1"))
+
+ #
+ # from remote external EP to local external EP
+ #
+ p = (Ether(src=self.pg7.remote_mac,
+ dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4,
+ dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=444, gpid=113, flags=0x88) /
+ Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
+ IP(src=rep.ip4, dst="10.220.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg7, p * 1, self.pg0)
+
+ #
+ # ping from an external host to the remote external EP
+ #
+ p = (Ether(src=self.vlan_100.remote_mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.1", dst=rep.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * 1, self.pg7)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ # self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ self.assertEqual(rx[VXLAN].vni, 444)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # the sclass of the ext-net the packet came from
+ self.assertEqual(rx[VXLAN].gpid, 4220)
+ # policy was applied to the original IP packet
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ # since it's an external host the reciever should not learn it
+ self.assertTrue(rx[VXLAN].gpflags.D)
+ inner = rx[VXLAN].payload
+ self.assertEqual(inner[IP].src, "10.220.0.1")
+ self.assertEqual(inner[IP].dst, rep.ip4)
+
+ #
+ # An external subnet reachable via the remote external EP
+ #
+
+ #
+ # first the VXLAN-GBP tunnel over which it is reached
+ #
+ vx_tun_r = VppVxlanGbpTunnel(
+ self, self.pg7.local_ip4,
+ self.pg7.remote_ip4, 445,
+ mode=(VppEnum.vl_api_vxlan_gbp_api_tunnel_mode_t.
+ VXLAN_GBP_API_TUNNEL_MODE_L3))
+ vx_tun_r.add_vpp_config()
+ VppIpInterfaceBind(self, vx_tun_r, t4).add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh vxlan-gbp tunnel"))
+
+ #
+ # then the special adj to resolve through on that tunnel
+ #
+ n1 = VppNeighbor(self,
+ vx_tun_r.sw_if_index,
+ "00:0c:0c:0c:0c:0c",
+ self.pg7.remote_ip4)
+ n1.add_vpp_config()
+
+ #
+ # the route via the adj above
+ #
+ ip_222 = VppIpRoute(self, "10.222.0.0", 24,
+ [VppRoutePath(self.pg7.remote_ip4,
+ vx_tun_r.sw_if_index)],
+ table_id=t4.table_id)
+ ip_222.add_vpp_config()
+
+ l3o_222 = VppGbpSubnet(
+ self, rd1, "10.222.0.0", 24,
+ # note: this a "regular" l3out subnet (not connected)
+ VppEnum.vl_api_gbp_subnet_type_t.GBP_API_SUBNET_L3_OUT,
+ sclass=4222)
+ l3o_222.add_vpp_config()
+
+ #
+ # ping between hosts in local and remote external subnets
+ # dropped without a contract
+ #
+ p = (Ether(src=self.vlan_100.remote_mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.1", dst="10.222.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_assert_no_replies(self.pg0, p * 1)
+
+ #
+ # Add contracts ext-nets for 220 -> 222
+ #
+ c4 = VppGbpContract(
+ self, 55, 4220, 4222, acl.acl_index,
+ [VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ []),
+ VppGbpContractRule(
+ VppEnum.vl_api_gbp_rule_action_t.GBP_API_RULE_PERMIT,
+ VppEnum.vl_api_gbp_hash_mode_t.GBP_API_HASH_MODE_SRC_IP,
+ [])],
+ [ETH_P_IP, ETH_P_IPV6])
+ c4.add_vpp_config()
+
+ #
+ # ping from host in local to remote external subnets
+ #
+ p = (Ether(src=self.vlan_100.remote_mac, dst=str(self.router_mac)) /
+ Dot1Q(vlan=100) /
+ IP(src="10.220.0.1", dst="10.222.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg0, p * 3, self.pg7)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg7.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg7.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg7.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg7.remote_ip4)
+ self.assertEqual(rx[VXLAN].vni, 445)
+ self.assertTrue(rx[VXLAN].flags.G)
+ self.assertTrue(rx[VXLAN].flags.Instance)
+ # the sclass of the ext-net the packet came from
+ self.assertEqual(rx[VXLAN].gpid, 4220)
+ # policy was applied to the original IP packet
+ self.assertTrue(rx[VXLAN].gpflags.A)
+ # since it's an external host the reciever should not learn it
+ self.assertTrue(rx[VXLAN].gpflags.D)
+ inner = rx[VXLAN].payload
+ self.assertEqual(inner[Ether].dst, "00:0c:0c:0c:0c:0c")
+ self.assertEqual(inner[IP].src, "10.220.0.1")
+ self.assertEqual(inner[IP].dst, "10.222.0.1")
+
+ #
+ # ping from host in remote to local external subnets
+ # there's no contract for this, but the A bit is set.
+ #
+ p = (Ether(src=self.pg7.remote_mac, dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4, dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=445, gpid=4222, flags=0x88, gpflags='A') /
+ Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
+ IP(src="10.222.0.1", dst="10.220.0.1") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_expect(self.pg7, p * 3, self.pg0)
+ self.assertFalse(find_gbp_endpoint(self, ip="10.222.0.1"))
+
+ #
+ # ping from host in remote to remote external subnets
+ # this is dropped by reflection check.
+ #
+ p = (Ether(src=self.pg7.remote_mac, dst=self.pg7.local_mac) /
+ IP(src=self.pg7.remote_ip4, dst=self.pg7.local_ip4) /
+ UDP(sport=1234, dport=48879) /
+ VXLAN(vni=445, gpid=4222, flags=0x88, gpflags='A') /
+ Ether(src=self.pg0.remote_mac, dst=str(self.router_mac)) /
+ IP(src="10.222.0.1", dst="10.222.0.2") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ rxs = self.send_and_assert_no_replies(self.pg7, p * 3)
+
+ #
+ # cleanup
+ #
+ self.vlan_101.set_vtr(L2_VTR_OP.L2_DISABLED)
+ self.vlan_100.set_vtr(L2_VTR_OP.L2_DISABLED)
+ self.pg7.unconfig_ip4()
+ # make sure the programmed EP is no longer learnt from DP
+ self.wait_for_ep_timeout(sw_if_index=rep.itf.sw_if_index, ip=rep.ip4)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_geneve.py b/test/test_geneve.py
new file mode 100644
index 00000000000..9ce1f8ff643
--- /dev/null
+++ b/test/test_geneve.py
@@ -0,0 +1,307 @@
+#!/usr/bin/env python3
+
+import socket
+from util import ip4_range
+import unittest
+from framework import VppTestCase, VppTestRunner
+from template_bd import BridgeDomain
+
+from scapy.layers.l2 import Ether, ARP
+from scapy.layers.inet import IP, UDP, ICMP
+from scapy.contrib.geneve import GENEVE
+
+import util
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_ip import INVALID_INDEX
+
+
+class TestGeneve(BridgeDomain, VppTestCase):
+ """ GENEVE Test Case """
+
+ def __init__(self, *args):
+ BridgeDomain.__init__(self)
+ VppTestCase.__init__(self, *args)
+
+ def encapsulate(self, pkt, vni):
+
+ """
+ Encapsulate the original payload frame by adding GENEVE header with its
+ UDP, IP and Ethernet fields
+ """
+ return (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
+ UDP(sport=self.dport, dport=self.dport, chksum=0) /
+ GENEVE(vni=vni) /
+ pkt)
+
+ def ip_range(self, start, end):
+ """ range of remote ip's """
+ return ip4_range(self.pg0.remote_ip4, start, end)
+
+ def encap_mcast(self, pkt, src_ip, src_mac, vni):
+ """
+ Encapsulate the original payload frame by adding GENEVE header with its
+ UDP, IP and Ethernet fields
+ """
+ return (Ether(src=src_mac, dst=self.mcast_mac) /
+ IP(src=src_ip, dst=self.mcast_ip4) /
+ UDP(sport=self.dport, dport=self.dport, chksum=0) /
+ GENEVE(vni=vni) /
+ pkt)
+
+ def decapsulate(self, pkt):
+ """
+ Decapsulate the original payload frame by removing GENEVE header
+ """
+ # check if is set I flag
+ # self.assertEqual(pkt[GENEVE].flags, int('0x8', 16))
+ return pkt[GENEVE].payload
+
+ # Method for checking GENEVE encapsulation.
+ #
+ def check_encapsulation(self, pkt, vni, local_only=False, mcast_pkt=False):
+ # TODO: add error messages
+ # Verify source MAC is VPP_MAC and destination MAC is MY_MAC resolved
+ # by VPP using ARP.
+ self.assertEqual(pkt[Ether].src, self.pg0.local_mac)
+ if not local_only:
+ if not mcast_pkt:
+ self.assertEqual(pkt[Ether].dst, self.pg0.remote_mac)
+ else:
+ self.assertEqual(pkt[Ether].dst, type(self).mcast_mac)
+ # Verify GENEVE tunnel source IP is VPP_IP and destination IP is MY_IP.
+ self.assertEqual(pkt[IP].src, self.pg0.local_ip4)
+ if not local_only:
+ if not mcast_pkt:
+ self.assertEqual(pkt[IP].dst, self.pg0.remote_ip4)
+ else:
+ self.assertEqual(pkt[IP].dst, type(self).mcast_ip4)
+ # Verify UDP destination port is GENEVE 4789, source UDP port could be
+ # arbitrary.
+ self.assertEqual(pkt[UDP].dport, type(self).dport)
+ # TODO: checksum check
+ # Verify VNI
+ self.assertEqual(pkt[GENEVE].vni, vni)
+
+ @classmethod
+ def create_geneve_flood_test_bd(cls, vni, n_ucast_tunnels):
+ # Create 10 ucast geneve tunnels under bd
+ ip_range_start = 10
+ ip_range_end = ip_range_start + n_ucast_tunnels
+ next_hop_address = cls.pg0.remote_ip4
+ for dest_ip4 in ip4_range(next_hop_address, ip_range_start,
+ ip_range_end):
+ # add host route so dest_ip4 will not be resolved
+ rip = VppIpRoute(cls, dest_ip4, 32,
+ [VppRoutePath(next_hop_address,
+ INVALID_INDEX)],
+ register=False)
+ rip.add_vpp_config()
+ r = cls.vapi.geneve_add_del_tunnel(
+ local_address=cls.pg0.local_ip4, remote_address=dest_ip4,
+ vni=vni)
+ cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
+ bd_id=vni)
+
+ @classmethod
+ def add_del_shared_mcast_dst_load(cls, is_add):
+ """
+ add or del tunnels sharing the same mcast dst
+ to test geneve ref_count mechanism
+ """
+ n_shared_dst_tunnels = 10
+ vni_start = 10000
+ vni_end = vni_start + n_shared_dst_tunnels
+ for vni in range(vni_start, vni_end):
+ r = cls.vapi.geneve_add_del_tunnel(
+ local_address=cls.pg0.local_ip4,
+ remote_address=cls.mcast_ip4, mcast_sw_if_index=1,
+ is_add=is_add, vni=vni)
+ if r.sw_if_index == 0xffffffff:
+ raise ValueError("bad sw_if_index: ~0")
+
+ @classmethod
+ def add_shared_mcast_dst_load(cls):
+ cls.add_del_shared_mcast_dst_load(is_add=1)
+
+ @classmethod
+ def del_shared_mcast_dst_load(cls):
+ cls.add_del_shared_mcast_dst_load(is_add=0)
+
+ @classmethod
+ def add_del_mcast_tunnels_load(cls, is_add):
+ """
+ add or del tunnels to test geneve stability
+ """
+ n_distinct_dst_tunnels = 10
+ ip_range_start = 10
+ ip_range_end = ip_range_start + n_distinct_dst_tunnels
+ for dest_ip4 in ip4_range(cls.mcast_ip4, ip_range_start,
+ ip_range_end):
+ vni = int(dest_ip4.split('.')[3])
+ cls.vapi.geneve_add_del_tunnel(local_address=cls.pg0.local_ip4,
+ remote_address=dest_ip4,
+ mcast_sw_if_index=1, is_add=is_add,
+ vni=vni)
+
+ @classmethod
+ def add_mcast_tunnels_load(cls):
+ cls.add_del_mcast_tunnels_load(is_add=1)
+
+ @classmethod
+ def del_mcast_tunnels_load(cls):
+ cls.add_del_mcast_tunnels_load(is_add=0)
+
+ # Class method to start the GENEVE test case.
+ # Overrides setUpClass method in VppTestCase class.
+ # Python try..except statement is used to ensure that the tear down of
+ # the class will be executed even if exception is raised.
+ # @param cls The class pointer.
+ @classmethod
+ def setUpClass(cls):
+ super(TestGeneve, cls).setUpClass()
+
+ try:
+ cls.dport = 6081
+
+ # Create 2 pg interfaces.
+ cls.create_pg_interfaces(range(4))
+ for pg in cls.pg_interfaces:
+ pg.admin_up()
+
+ # Configure IPv4 addresses on VPP pg0.
+ cls.pg0.config_ip4()
+
+ # Resolve MAC address for VPP's IP address on pg0.
+ cls.pg0.resolve_arp()
+
+ # Our Multicast address
+ cls.mcast_ip4 = '239.1.1.1'
+ cls.mcast_mac = util.mcast_ip_to_mac(cls.mcast_ip4)
+
+ # Create GENEVE VTEP on VPP pg0, and put geneve_tunnel0 and pg1
+ # into BD.
+ cls.single_tunnel_vni = 0xabcde
+ cls.single_tunnel_bd = 1
+ r = cls.vapi.geneve_add_del_tunnel(
+ local_address=cls.pg0.local_ip4,
+ remote_address=cls.pg0.remote_ip4, vni=cls.single_tunnel_vni)
+ cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
+ bd_id=cls.single_tunnel_bd)
+ cls.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=cls.pg1.sw_if_index, bd_id=cls.single_tunnel_bd)
+
+ # Setup vni 2 to test multicast flooding
+ cls.n_ucast_tunnels = 10
+ cls.mcast_flood_bd = 2
+ cls.create_geneve_flood_test_bd(cls.mcast_flood_bd,
+ cls.n_ucast_tunnels)
+ r = cls.vapi.geneve_add_del_tunnel(
+ local_address=cls.pg0.local_ip4,
+ remote_address=cls.mcast_ip4, mcast_sw_if_index=1,
+ vni=cls.mcast_flood_bd)
+ cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
+ bd_id=cls.mcast_flood_bd)
+ cls.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=cls.pg2.sw_if_index, bd_id=cls.mcast_flood_bd)
+
+ # Add and delete mcast tunnels to check stability
+ cls.add_shared_mcast_dst_load()
+ cls.add_mcast_tunnels_load()
+ cls.del_shared_mcast_dst_load()
+ cls.del_mcast_tunnels_load()
+
+ # Setup vni 3 to test unicast flooding
+ cls.ucast_flood_bd = 3
+ cls.create_geneve_flood_test_bd(cls.ucast_flood_bd,
+ cls.n_ucast_tunnels)
+ cls.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=cls.pg3.sw_if_index, bd_id=cls.ucast_flood_bd)
+ except Exception:
+ super(TestGeneve, cls).tearDownClass()
+ raise
+
+ # Method to define VPP actions before tear down of the test case.
+ # Overrides tearDown method in VppTestCase class.
+ # @param self The object pointer.
+ def tearDown(self):
+ super(TestGeneve, self).tearDown()
+
+ def show_commands_at_teardown(self):
+ self.logger.info(self.vapi.cli("show bridge-domain 1 detail"))
+ self.logger.info(self.vapi.cli("show bridge-domain 2 detail"))
+ self.logger.info(self.vapi.cli("show bridge-domain 3 detail"))
+ self.logger.info(self.vapi.cli("show geneve tunnel"))
+
+
+class TestGeneveL3(VppTestCase):
+ """ GENEVE L3 Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestGeneveL3, cls).setUpClass()
+ try:
+ cls.create_pg_interfaces(range(2))
+ cls.interfaces = list(cls.pg_interfaces)
+
+ for i in cls.interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+ except Exception:
+ super(TestGeneveL3, cls).tearDownClass()
+ raise
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestGeneveL3, cls).tearDownClass()
+
+ def tearDown(self):
+ super(TestGeneveL3, self).tearDown()
+
+ def show_commands_at_teardown(self):
+ self.logger.info(self.vapi.cli("show geneve tunnel"))
+ self.logger.info(self.vapi.cli("show ip neighbor"))
+
+ def test_l3_packet(self):
+ vni = 1234
+ r = self.vapi.add_node_next(node_name="geneve4-input",
+ next_name="ethernet-input")
+ r = self.vapi.geneve_add_del_tunnel2(
+ is_add=1,
+ local_address=self.pg0.local_ip4,
+ remote_address=self.pg0.remote_ip4,
+ vni=vni,
+ l3_mode=1,
+ decap_next_index=r.next_index)
+
+ self.vapi.sw_interface_add_del_address(
+ sw_if_index=r.sw_if_index, prefix="10.0.0.1/24")
+
+ pkt = (Ether(src=self.pg0.remote_mac, dst="d0:0b:ee:d0:00:00") /
+ IP(src='10.0.0.2', dst='10.0.0.1') /
+ ICMP())
+
+ encap = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
+ UDP(sport=6081, dport=6081, chksum=0) /
+ GENEVE(vni=vni))
+
+ arp = (Ether(src=self.pg0.remote_mac, dst="d0:0b:ee:d0:00:00") /
+ ARP(op="is-at", hwsrc=self.pg0.remote_mac,
+ hwdst="d0:0b:ee:d0:00:00", psrc="10.0.0.2",
+ pdst="10.0.0.1"))
+
+ rx = self.send_and_expect(self.pg0, encap/pkt*1, self.pg0)
+ rx = self.send_and_assert_no_replies(self.pg0, encap/arp*1, self.pg0)
+ rx = self.send_and_expect(self.pg0, encap/pkt*1, self.pg0)
+ self.assertEqual(rx[0][ICMP].type, 0) # echo reply
+
+ r = self.vapi.geneve_add_del_tunnel2(
+ is_add=0,
+ local_address=self.pg0.local_ip4,
+ remote_address=self.pg0.remote_ip4,
+ vni=vni)
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_gre.py b/test/test_gre.py
new file mode 100644
index 00000000000..ba20ba8dec0
--- /dev/null
+++ b/test/test_gre.py
@@ -0,0 +1,1296 @@
+#!/usr/bin/env python3
+
+import unittest
+
+import scapy.compat
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether, Dot1Q, GRE
+from scapy.layers.inet import IP, UDP
+from scapy.layers.inet6 import IPv6
+from scapy.volatile import RandMAC, RandIP
+
+from framework import tag_fixme_vpp_workers
+from framework import VppTestCase, VppTestRunner
+from vpp_sub_interface import L2_VTR_OP, VppDot1QSubint
+from vpp_gre_interface import VppGreInterface
+from vpp_teib import VppTeib
+from vpp_ip import DpoProto
+from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpTable, FibPathProto, \
+ VppMplsLabel
+from vpp_mpls_tunnel_interface import VppMPLSTunnelInterface
+from util import ppp, ppc
+from vpp_papi import VppEnum
+
+
+@tag_fixme_vpp_workers
+class TestGREInputNodes(VppTestCase):
+ """ GRE Input Nodes Test Case """
+
+ def setUp(self):
+ super(TestGREInputNodes, self).setUp()
+
+ # create 3 pg interfaces - set one in a non-default table.
+ self.create_pg_interfaces(range(1))
+
+ for i in self.pg_interfaces:
+ i.admin_up()
+ i.config_ip4()
+
+ def tearDown(self):
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.admin_down()
+ super(TestGREInputNodes, self).tearDown()
+
+ def test_gre_input_node(self):
+ """ GRE gre input nodes not registerd unless configured """
+ pkt = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
+ GRE())
+
+ self.pg0.add_stream(pkt)
+ self.pg_start()
+ # no tunnel created, gre-input not registered
+ err = self.statistics.get_counter(
+ '/err/ip4-local/unknown ip protocol')[0]
+ self.assertEqual(err, 1)
+ err_count = err
+
+ # create gre tunnel
+ gre_if = VppGreInterface(self, self.pg0.local_ip4, "1.1.1.2")
+ gre_if.add_vpp_config()
+
+ self.pg0.add_stream(pkt)
+ self.pg_start()
+ # tunnel created, gre-input registered
+ err = self.statistics.get_counter(
+ '/err/ip4-local/unknown ip protocol')[0]
+ # expect no new errors
+ self.assertEqual(err, err_count)
+
+
+class TestGRE(VppTestCase):
+ """ GRE Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestGRE, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestGRE, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestGRE, self).setUp()
+
+ # create 3 pg interfaces - set one in a non-default table.
+ self.create_pg_interfaces(range(5))
+
+ self.tbl = VppIpTable(self, 1)
+ self.tbl.add_vpp_config()
+ self.pg1.set_table_ip4(1)
+
+ for i in self.pg_interfaces:
+ i.admin_up()
+
+ self.pg0.config_ip4()
+ self.pg0.resolve_arp()
+ self.pg1.config_ip4()
+ self.pg1.resolve_arp()
+ self.pg2.config_ip6()
+ self.pg2.resolve_ndp()
+ self.pg3.config_ip4()
+ self.pg3.resolve_arp()
+ self.pg4.config_ip4()
+ self.pg4.resolve_arp()
+
+ def tearDown(self):
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.unconfig_ip6()
+ i.admin_down()
+ self.pg1.set_table_ip4(0)
+ super(TestGRE, self).tearDown()
+
+ def create_stream_ip4(self, src_if, src_ip, dst_ip, dscp=0, ecn=0):
+ pkts = []
+ tos = (dscp << 2) | ecn
+ for i in range(0, 257):
+ info = self.create_packet_info(src_if, src_if)
+ payload = self.info_to_payload(info)
+ p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ IP(src=src_ip, dst=dst_ip, tos=tos) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ info.data = p.copy()
+ pkts.append(p)
+ return pkts
+
+ def create_stream_ip6(self, src_if, src_ip, dst_ip, dscp=0, ecn=0):
+ pkts = []
+ tc = (dscp << 2) | ecn
+ for i in range(0, 257):
+ info = self.create_packet_info(src_if, src_if)
+ payload = self.info_to_payload(info)
+ p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ IPv6(src=src_ip, dst=dst_ip, tc=tc) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ info.data = p.copy()
+ pkts.append(p)
+ return pkts
+
+ def create_tunnel_stream_4o4(self, src_if,
+ tunnel_src, tunnel_dst,
+ src_ip, dst_ip):
+ pkts = []
+ for i in range(0, 257):
+ info = self.create_packet_info(src_if, src_if)
+ payload = self.info_to_payload(info)
+ p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ IP(src=tunnel_src, dst=tunnel_dst) /
+ GRE() /
+ IP(src=src_ip, dst=dst_ip) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ info.data = p.copy()
+ pkts.append(p)
+ return pkts
+
+ def create_tunnel_stream_6o4(self, src_if,
+ tunnel_src, tunnel_dst,
+ src_ip, dst_ip):
+ pkts = []
+ for i in range(0, 257):
+ info = self.create_packet_info(src_if, src_if)
+ payload = self.info_to_payload(info)
+ p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ IP(src=tunnel_src, dst=tunnel_dst) /
+ GRE() /
+ IPv6(src=src_ip, dst=dst_ip) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ info.data = p.copy()
+ pkts.append(p)
+ return pkts
+
+ def create_tunnel_stream_6o6(self, src_if,
+ tunnel_src, tunnel_dst,
+ src_ip, dst_ip):
+ pkts = []
+ for i in range(0, 257):
+ info = self.create_packet_info(src_if, src_if)
+ payload = self.info_to_payload(info)
+ p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ IPv6(src=tunnel_src, dst=tunnel_dst) /
+ GRE() /
+ IPv6(src=src_ip, dst=dst_ip) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ info.data = p.copy()
+ pkts.append(p)
+ return pkts
+
+ def create_tunnel_stream_l2o4(self, src_if,
+ tunnel_src, tunnel_dst):
+ pkts = []
+ for i in range(0, 257):
+ info = self.create_packet_info(src_if, src_if)
+ payload = self.info_to_payload(info)
+ p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ IP(src=tunnel_src, dst=tunnel_dst) /
+ GRE() /
+ Ether(dst=RandMAC('*:*:*:*:*:*'),
+ src=RandMAC('*:*:*:*:*:*')) /
+ IP(src=scapy.compat.raw(RandIP()),
+ dst=scapy.compat.raw(RandIP())) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ info.data = p.copy()
+ pkts.append(p)
+ return pkts
+
+ def create_tunnel_stream_vlano4(self, src_if,
+ tunnel_src, tunnel_dst, vlan):
+ pkts = []
+ for i in range(0, 257):
+ info = self.create_packet_info(src_if, src_if)
+ payload = self.info_to_payload(info)
+ p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ IP(src=tunnel_src, dst=tunnel_dst) /
+ GRE() /
+ Ether(dst=RandMAC('*:*:*:*:*:*'),
+ src=RandMAC('*:*:*:*:*:*')) /
+ Dot1Q(vlan=vlan) /
+ IP(src=scapy.compat.raw(RandIP()),
+ dst=scapy.compat.raw(RandIP())) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ info.data = p.copy()
+ pkts.append(p)
+ return pkts
+
+ def verify_tunneled_4o4(self, src_if, capture, sent,
+ tunnel_src, tunnel_dst,
+ dscp=0, ecn=0):
+
+ self.assertEqual(len(capture), len(sent))
+ tos = (dscp << 2) | ecn
+
+ for i in range(len(capture)):
+ try:
+ tx = sent[i]
+ rx = capture[i]
+
+ tx_ip = tx[IP]
+ rx_ip = rx[IP]
+
+ self.assertEqual(rx_ip.src, tunnel_src)
+ self.assertEqual(rx_ip.dst, tunnel_dst)
+ self.assertEqual(rx_ip.tos, tos)
+ self.assertEqual(rx_ip.len, len(rx_ip))
+
+ rx_gre = rx[GRE]
+ rx_ip = rx_gre[IP]
+
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+ # IP processing post pop has decremented the TTL
+ self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
+
+ except:
+ self.logger.error(ppp("Rx:", rx))
+ self.logger.error(ppp("Tx:", tx))
+ raise
+
+ def verify_tunneled_6o6(self, src_if, capture, sent,
+ tunnel_src, tunnel_dst,
+ dscp=0, ecn=0):
+
+ self.assertEqual(len(capture), len(sent))
+ tc = (dscp << 2) | ecn
+
+ for i in range(len(capture)):
+ try:
+ tx = sent[i]
+ rx = capture[i]
+
+ tx_ip = tx[IPv6]
+ rx_ip = rx[IPv6]
+
+ self.assertEqual(rx_ip.src, tunnel_src)
+ self.assertEqual(rx_ip.dst, tunnel_dst)
+ self.assertEqual(rx_ip.tc, tc)
+
+ rx_gre = GRE(scapy.compat.raw(rx_ip[IPv6].payload))
+
+ self.assertEqual(rx_ip.plen, len(rx_gre))
+
+ rx_ip = rx_gre[IPv6]
+
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+
+ except:
+ self.logger.error(ppp("Rx:", rx))
+ self.logger.error(ppp("Tx:", tx))
+ raise
+
+ def verify_tunneled_4o6(self, src_if, capture, sent,
+ tunnel_src, tunnel_dst):
+
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ try:
+ tx = sent[i]
+ rx = capture[i]
+
+ rx_ip = rx[IPv6]
+
+ self.assertEqual(rx_ip.src, tunnel_src)
+ self.assertEqual(rx_ip.dst, tunnel_dst)
+
+ rx_gre = GRE(scapy.compat.raw(rx_ip[IPv6].payload))
+
+ self.assertEqual(rx_ip.plen, len(rx_gre))
+
+ tx_ip = tx[IP]
+ rx_ip = rx_gre[IP]
+
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+
+ except:
+ self.logger.error(ppp("Rx:", rx))
+ self.logger.error(ppp("Tx:", tx))
+ raise
+
+ def verify_tunneled_6o4(self, src_if, capture, sent,
+ tunnel_src, tunnel_dst):
+
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ try:
+ tx = sent[i]
+ rx = capture[i]
+
+ rx_ip = rx[IP]
+
+ self.assertEqual(rx_ip.src, tunnel_src)
+ self.assertEqual(rx_ip.dst, tunnel_dst)
+ self.assertEqual(rx_ip.len, len(rx_ip))
+
+ rx_gre = GRE(scapy.compat.raw(rx_ip[IP].payload))
+ rx_ip = rx_gre[IPv6]
+ tx_ip = tx[IPv6]
+
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+
+ except:
+ self.logger.error(ppp("Rx:", rx))
+ self.logger.error(ppp("Tx:", tx))
+ raise
+
+ def verify_tunneled_l2o4(self, src_if, capture, sent,
+ tunnel_src, tunnel_dst):
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ try:
+ tx = sent[i]
+ rx = capture[i]
+
+ tx_ip = tx[IP]
+ rx_ip = rx[IP]
+
+ self.assertEqual(rx_ip.src, tunnel_src)
+ self.assertEqual(rx_ip.dst, tunnel_dst)
+ self.assertEqual(rx_ip.len, len(rx_ip))
+
+ rx_gre = rx[GRE]
+ rx_l2 = rx_gre[Ether]
+ rx_ip = rx_l2[IP]
+ tx_gre = tx[GRE]
+ tx_l2 = tx_gre[Ether]
+ tx_ip = tx_l2[IP]
+
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+ # bridged, not L3 forwarded, so no TTL decrement
+ self.assertEqual(rx_ip.ttl, tx_ip.ttl)
+
+ except:
+ self.logger.error(ppp("Rx:", rx))
+ self.logger.error(ppp("Tx:", tx))
+ raise
+
+ def verify_tunneled_vlano4(self, src_if, capture, sent,
+ tunnel_src, tunnel_dst, vlan):
+ try:
+ self.assertEqual(len(capture), len(sent))
+ except:
+ ppc("Unexpected packets captured:", capture)
+ raise
+
+ for i in range(len(capture)):
+ try:
+ tx = sent[i]
+ rx = capture[i]
+
+ tx_ip = tx[IP]
+ rx_ip = rx[IP]
+
+ self.assertEqual(rx_ip.src, tunnel_src)
+ self.assertEqual(rx_ip.dst, tunnel_dst)
+
+ rx_gre = rx[GRE]
+ rx_l2 = rx_gre[Ether]
+ rx_vlan = rx_l2[Dot1Q]
+ rx_ip = rx_l2[IP]
+
+ self.assertEqual(rx_vlan.vlan, vlan)
+
+ tx_gre = tx[GRE]
+ tx_l2 = tx_gre[Ether]
+ tx_ip = tx_l2[IP]
+
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+ # bridged, not L3 forwarded, so no TTL decrement
+ self.assertEqual(rx_ip.ttl, tx_ip.ttl)
+
+ except:
+ self.logger.error(ppp("Rx:", rx))
+ self.logger.error(ppp("Tx:", tx))
+ raise
+
+ def verify_decapped_4o4(self, src_if, capture, sent):
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ try:
+ tx = sent[i]
+ rx = capture[i]
+
+ tx_ip = tx[IP]
+ rx_ip = rx[IP]
+ tx_gre = tx[GRE]
+ tx_ip = tx_gre[IP]
+
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+ # IP processing post pop has decremented the TTL
+ self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
+
+ except:
+ self.logger.error(ppp("Rx:", rx))
+ self.logger.error(ppp("Tx:", tx))
+ raise
+
+ def verify_decapped_6o4(self, src_if, capture, sent):
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ try:
+ tx = sent[i]
+ rx = capture[i]
+
+ tx_ip = tx[IP]
+ rx_ip = rx[IPv6]
+ tx_gre = tx[GRE]
+ tx_ip = tx_gre[IPv6]
+
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+ self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim)
+
+ except:
+ self.logger.error(ppp("Rx:", rx))
+ self.logger.error(ppp("Tx:", tx))
+ raise
+
+ def verify_decapped_6o6(self, src_if, capture, sent):
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ try:
+ tx = sent[i]
+ rx = capture[i]
+
+ tx_ip = tx[IPv6]
+ rx_ip = rx[IPv6]
+ tx_gre = tx[GRE]
+ tx_ip = tx_gre[IPv6]
+
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+ self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim)
+
+ except:
+ self.logger.error(ppp("Rx:", rx))
+ self.logger.error(ppp("Tx:", tx))
+ raise
+
+ def test_gre(self):
+ """ GRE IPv4 tunnel Tests """
+
+ #
+ # Create an L3 GRE tunnel.
+ # - set it admin up
+ # - assign an IP Addres
+ # - Add a route via the tunnel
+ #
+ gre_if = VppGreInterface(self,
+ self.pg0.local_ip4,
+ "1.1.1.2")
+ gre_if.add_vpp_config()
+
+ #
+ # The double create (create the same tunnel twice) should fail,
+ # and we should still be able to use the original
+ #
+ try:
+ gre_if.add_vpp_config()
+ except Exception:
+ pass
+ else:
+ self.fail("Double GRE tunnel add does not fail")
+
+ gre_if.admin_up()
+ gre_if.config_ip4()
+
+ route_via_tun = VppIpRoute(self, "4.4.4.4", 32,
+ [VppRoutePath("0.0.0.0",
+ gre_if.sw_if_index)])
+
+ route_via_tun.add_vpp_config()
+
+ #
+ # Send a packet stream that is routed into the tunnel
+ # - they are all dropped since the tunnel's destintation IP
+ # is unresolved - or resolves via the default route - which
+ # which is a drop.
+ #
+ tx = self.create_stream_ip4(self.pg0, "5.5.5.5", "4.4.4.4")
+
+ self.send_and_assert_no_replies(self.pg0, tx)
+
+ #
+ # Add a route that resolves the tunnel's destination
+ #
+ route_tun_dst = VppIpRoute(self, "1.1.1.2", 32,
+ [VppRoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index)])
+ route_tun_dst.add_vpp_config()
+
+ #
+ # Send a packet stream that is routed into the tunnel
+ # - packets are GRE encapped
+ #
+ tx = self.create_stream_ip4(self.pg0, "5.5.5.5", "4.4.4.4")
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_tunneled_4o4(self.pg0, rx, tx,
+ self.pg0.local_ip4, "1.1.1.2")
+
+ #
+ # Send tunneled packets that match the created tunnel and
+ # are decapped and forwarded
+ #
+ tx = self.create_tunnel_stream_4o4(self.pg0,
+ "1.1.1.2",
+ self.pg0.local_ip4,
+ self.pg0.local_ip4,
+ self.pg0.remote_ip4)
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_decapped_4o4(self.pg0, rx, tx)
+
+ #
+ # Send tunneled packets that do not match the tunnel's src
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_tunnel_stream_4o4(self.pg0,
+ "1.1.1.3",
+ self.pg0.local_ip4,
+ self.pg0.local_ip4,
+ self.pg0.remote_ip4)
+ self.send_and_assert_no_replies(
+ self.pg0, tx,
+ remark="GRE packets forwarded despite no SRC address match")
+
+ #
+ # Configure IPv6 on the PG interface so we can route IPv6
+ # packets
+ #
+ self.pg0.config_ip6()
+ self.pg0.resolve_ndp()
+
+ #
+ # Send IPv6 tunnel encapslated packets
+ # - dropped since IPv6 is not enabled on the tunnel
+ #
+ tx = self.create_tunnel_stream_6o4(self.pg0,
+ "1.1.1.2",
+ self.pg0.local_ip4,
+ self.pg0.local_ip6,
+ self.pg0.remote_ip6)
+ self.send_and_assert_no_replies(self.pg0, tx,
+ "IPv6 GRE packets forwarded "
+ "despite IPv6 not enabled on tunnel")
+
+ #
+ # Enable IPv6 on the tunnel
+ #
+ gre_if.config_ip6()
+
+ #
+ # Send IPv6 tunnel encapslated packets
+ # - forwarded since IPv6 is enabled on the tunnel
+ #
+ tx = self.create_tunnel_stream_6o4(self.pg0,
+ "1.1.1.2",
+ self.pg0.local_ip4,
+ self.pg0.local_ip6,
+ self.pg0.remote_ip6)
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_decapped_6o4(self.pg0, rx, tx)
+
+ #
+ # Send v6 packets for v4 encap
+ #
+ route6_via_tun = VppIpRoute(
+ self, "2001::1", 128,
+ [VppRoutePath("::",
+ gre_if.sw_if_index,
+ proto=DpoProto.DPO_PROTO_IP6)])
+ route6_via_tun.add_vpp_config()
+
+ tx = self.create_stream_ip6(self.pg0, "2001::2", "2001::1")
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+
+ self.verify_tunneled_6o4(self.pg0, rx, tx,
+ self.pg0.local_ip4, "1.1.1.2")
+
+ #
+ # add a labelled route through the tunnel
+ #
+ label_via_tun = VppIpRoute(self, "5.4.3.2", 32,
+ [VppRoutePath("0.0.0.0",
+ gre_if.sw_if_index,
+ labels=[VppMplsLabel(33)])])
+ label_via_tun.add_vpp_config()
+
+ tx = self.create_stream_ip4(self.pg0, "5.5.5.5", "5.4.3.2")
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_tunneled_4o4(self.pg0, rx, tx,
+ self.pg0.local_ip4, "1.1.1.2")
+
+ #
+ # an MPLS tunnel over the GRE tunnel add a route through
+ # the mpls tunnel
+ #
+ mpls_tun = VppMPLSTunnelInterface(
+ self,
+ [VppRoutePath("0.0.0.0",
+ gre_if.sw_if_index,
+ labels=[VppMplsLabel(44),
+ VppMplsLabel(46)])])
+ mpls_tun.add_vpp_config()
+ mpls_tun.admin_up()
+
+ label_via_mpls = VppIpRoute(self, "5.4.3.1", 32,
+ [VppRoutePath("0.0.0.0",
+ mpls_tun.sw_if_index,
+ labels=[VppMplsLabel(33)])])
+ label_via_mpls.add_vpp_config()
+
+ tx = self.create_stream_ip4(self.pg0, "5.5.5.5", "5.4.3.1")
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_tunneled_4o4(self.pg0, rx, tx,
+ self.pg0.local_ip4, "1.1.1.2")
+
+ mpls_tun_l2 = VppMPLSTunnelInterface(
+ self,
+ [VppRoutePath("0.0.0.0",
+ gre_if.sw_if_index,
+ labels=[VppMplsLabel(44),
+ VppMplsLabel(46)])],
+ is_l2=1)
+ mpls_tun_l2.add_vpp_config()
+ mpls_tun_l2.admin_up()
+
+ #
+ # test case cleanup
+ #
+ route_tun_dst.remove_vpp_config()
+ route_via_tun.remove_vpp_config()
+ route6_via_tun.remove_vpp_config()
+ label_via_mpls.remove_vpp_config()
+ label_via_tun.remove_vpp_config()
+ mpls_tun.remove_vpp_config()
+ mpls_tun_l2.remove_vpp_config()
+ gre_if.remove_vpp_config()
+
+ self.pg0.unconfig_ip6()
+
+ def test_gre6(self):
+ """ GRE IPv6 tunnel Tests """
+
+ self.pg1.config_ip6()
+ self.pg1.resolve_ndp()
+
+ #
+ # Create an L3 GRE tunnel.
+ # - set it admin up
+ # - assign an IP Address
+ # - Add a route via the tunnel
+ #
+ gre_if = VppGreInterface(self,
+ self.pg2.local_ip6,
+ "1002::1")
+ gre_if.add_vpp_config()
+ gre_if.admin_up()
+ gre_if.config_ip6()
+
+ route_via_tun = VppIpRoute(self, "4004::1", 128,
+ [VppRoutePath("0::0",
+ gre_if.sw_if_index)])
+
+ route_via_tun.add_vpp_config()
+
+ #
+ # Send a packet stream that is routed into the tunnel
+ # - they are all dropped since the tunnel's destintation IP
+ # is unresolved - or resolves via the default route - which
+ # which is a drop.
+ #
+ tx = self.create_stream_ip6(self.pg2, "5005::1", "4004::1")
+ self.send_and_assert_no_replies(
+ self.pg2, tx,
+ "GRE packets forwarded without DIP resolved")
+
+ #
+ # Add a route that resolves the tunnel's destination
+ #
+ route_tun_dst = VppIpRoute(self, "1002::1", 128,
+ [VppRoutePath(self.pg2.remote_ip6,
+ self.pg2.sw_if_index)])
+ route_tun_dst.add_vpp_config()
+
+ #
+ # Send a packet stream that is routed into the tunnel
+ # - packets are GRE encapped
+ #
+ tx = self.create_stream_ip6(self.pg2, "5005::1", "4004::1")
+ rx = self.send_and_expect(self.pg2, tx, self.pg2)
+ self.verify_tunneled_6o6(self.pg2, rx, tx,
+ self.pg2.local_ip6, "1002::1")
+
+ #
+ # Test decap. decapped packets go out pg1
+ #
+ tx = self.create_tunnel_stream_6o6(self.pg2,
+ "1002::1",
+ self.pg2.local_ip6,
+ "2001::1",
+ self.pg1.remote_ip6)
+ rx = self.send_and_expect(self.pg2, tx, self.pg1)
+
+ #
+ # RX'd packet is UDP over IPv6, test the GRE header is gone.
+ #
+ self.assertFalse(rx[0].haslayer(GRE))
+ self.assertEqual(rx[0][IPv6].dst, self.pg1.remote_ip6)
+
+ #
+ # Send v4 over v6
+ #
+ route4_via_tun = VppIpRoute(self, "1.1.1.1", 32,
+ [VppRoutePath("0.0.0.0",
+ gre_if.sw_if_index)])
+ route4_via_tun.add_vpp_config()
+
+ tx = self.create_stream_ip4(self.pg0, "1.1.1.2", "1.1.1.1")
+ rx = self.send_and_expect(self.pg0, tx, self.pg2)
+
+ self.verify_tunneled_4o6(self.pg0, rx, tx,
+ self.pg2.local_ip6, "1002::1")
+
+ #
+ # test case cleanup
+ #
+ route_tun_dst.remove_vpp_config()
+ route_via_tun.remove_vpp_config()
+ route4_via_tun.remove_vpp_config()
+ gre_if.remove_vpp_config()
+
+ self.pg2.unconfig_ip6()
+ self.pg1.unconfig_ip6()
+
+ def test_gre_vrf(self):
+ """ GRE tunnel VRF Tests """
+
+ e = VppEnum.vl_api_tunnel_encap_decap_flags_t
+
+ #
+ # Create an L3 GRE tunnel whose destination is in the non-default
+ # table. The underlay is thus non-default - the overlay is still
+ # the default.
+ # - set it admin up
+ # - assign an IP Addres
+ #
+ gre_if = VppGreInterface(
+ self, self.pg1.local_ip4,
+ "2.2.2.2",
+ outer_table_id=1,
+ flags=(e.TUNNEL_API_ENCAP_DECAP_FLAG_ENCAP_COPY_DSCP |
+ e.TUNNEL_API_ENCAP_DECAP_FLAG_ENCAP_COPY_ECN))
+
+ gre_if.add_vpp_config()
+ gre_if.admin_up()
+ gre_if.config_ip4()
+
+ #
+ # Add a route via the tunnel - in the overlay
+ #
+ route_via_tun = VppIpRoute(self, "9.9.9.9", 32,
+ [VppRoutePath("0.0.0.0",
+ gre_if.sw_if_index)])
+ route_via_tun.add_vpp_config()
+
+ #
+ # Add a route that resolves the tunnel's destination - in the
+ # underlay table
+ #
+ route_tun_dst = VppIpRoute(self, "2.2.2.2", 32, table_id=1,
+ paths=[VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index)])
+ route_tun_dst.add_vpp_config()
+
+ #
+ # Send a packet stream that is routed into the tunnel
+ # packets are sent in on pg0 which is in the default table
+ # - packets are GRE encapped
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_ip4(self.pg0, "5.5.5.5", "9.9.9.9",
+ dscp=5, ecn=3)
+ rx = self.send_and_expect(self.pg0, tx, self.pg1)
+ self.verify_tunneled_4o4(self.pg1, rx, tx,
+ self.pg1.local_ip4, "2.2.2.2",
+ dscp=5, ecn=3)
+
+ #
+ # Send tunneled packets that match the created tunnel and
+ # are decapped and forwarded. This tests the decap lookup
+ # does not happen in the encap table
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_tunnel_stream_4o4(self.pg1,
+ "2.2.2.2",
+ self.pg1.local_ip4,
+ self.pg0.local_ip4,
+ self.pg0.remote_ip4)
+ rx = self.send_and_expect(self.pg1, tx, self.pg0)
+ self.verify_decapped_4o4(self.pg0, rx, tx)
+
+ #
+ # Send tunneled packets that match the created tunnel
+ # but arrive on an interface that is not in the tunnel's
+ # encap VRF, these are dropped.
+ # IP enable the interface so they aren't dropped due to
+ # IP not being enabled.
+ #
+ self.pg2.config_ip4()
+ self.vapi.cli("clear trace")
+ tx = self.create_tunnel_stream_4o4(self.pg2,
+ "2.2.2.2",
+ self.pg1.local_ip4,
+ self.pg0.local_ip4,
+ self.pg0.remote_ip4)
+ rx = self.send_and_assert_no_replies(
+ self.pg2, tx,
+ "GRE decap packets in wrong VRF")
+
+ self.pg2.unconfig_ip4()
+
+ #
+ # test case cleanup
+ #
+ route_tun_dst.remove_vpp_config()
+ route_via_tun.remove_vpp_config()
+ gre_if.remove_vpp_config()
+
+ def test_gre_l2(self):
+ """ GRE tunnel L2 Tests """
+
+ #
+ # Add routes to resolve the tunnel destinations
+ #
+ route_tun1_dst = VppIpRoute(self, "2.2.2.2", 32,
+ [VppRoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index)])
+ route_tun2_dst = VppIpRoute(self, "2.2.2.3", 32,
+ [VppRoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index)])
+
+ route_tun1_dst.add_vpp_config()
+ route_tun2_dst.add_vpp_config()
+
+ #
+ # Create 2 L2 GRE tunnels and x-connect them
+ #
+ gre_if1 = VppGreInterface(self, self.pg0.local_ip4,
+ "2.2.2.2",
+ type=(VppEnum.vl_api_gre_tunnel_type_t.
+ GRE_API_TUNNEL_TYPE_TEB))
+ gre_if2 = VppGreInterface(self, self.pg0.local_ip4,
+ "2.2.2.3",
+ type=(VppEnum.vl_api_gre_tunnel_type_t.
+ GRE_API_TUNNEL_TYPE_TEB))
+ gre_if1.add_vpp_config()
+ gre_if2.add_vpp_config()
+
+ gre_if1.admin_up()
+ gre_if2.admin_up()
+
+ self.vapi.sw_interface_set_l2_xconnect(gre_if1.sw_if_index,
+ gre_if2.sw_if_index,
+ enable=1)
+ self.vapi.sw_interface_set_l2_xconnect(gre_if2.sw_if_index,
+ gre_if1.sw_if_index,
+ enable=1)
+
+ #
+ # Send in tunnel encapped L2. expect out tunnel encapped L2
+ # in both directions
+ #
+ tx = self.create_tunnel_stream_l2o4(self.pg0,
+ "2.2.2.2",
+ self.pg0.local_ip4)
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_tunneled_l2o4(self.pg0, rx, tx,
+ self.pg0.local_ip4,
+ "2.2.2.3")
+
+ tx = self.create_tunnel_stream_l2o4(self.pg0,
+ "2.2.2.3",
+ self.pg0.local_ip4)
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_tunneled_l2o4(self.pg0, rx, tx,
+ self.pg0.local_ip4,
+ "2.2.2.2")
+
+ self.vapi.sw_interface_set_l2_xconnect(gre_if1.sw_if_index,
+ gre_if2.sw_if_index,
+ enable=0)
+ self.vapi.sw_interface_set_l2_xconnect(gre_if2.sw_if_index,
+ gre_if1.sw_if_index,
+ enable=0)
+
+ #
+ # Create a VLAN sub-interfaces on the GRE TEB interfaces
+ # then x-connect them
+ #
+ gre_if_11 = VppDot1QSubint(self, gre_if1, 11)
+ gre_if_12 = VppDot1QSubint(self, gre_if2, 12)
+
+ # gre_if_11.add_vpp_config()
+ # gre_if_12.add_vpp_config()
+
+ gre_if_11.admin_up()
+ gre_if_12.admin_up()
+
+ self.vapi.sw_interface_set_l2_xconnect(gre_if_11.sw_if_index,
+ gre_if_12.sw_if_index,
+ enable=1)
+ self.vapi.sw_interface_set_l2_xconnect(gre_if_12.sw_if_index,
+ gre_if_11.sw_if_index,
+ enable=1)
+
+ #
+ # Configure both to pop thier respective VLAN tags,
+ # so that during the x-coonect they will subsequently push
+ #
+ self.vapi.l2_interface_vlan_tag_rewrite(
+ sw_if_index=gre_if_12.sw_if_index, vtr_op=L2_VTR_OP.L2_POP_1,
+ push_dot1q=12)
+ self.vapi.l2_interface_vlan_tag_rewrite(
+ sw_if_index=gre_if_11.sw_if_index, vtr_op=L2_VTR_OP.L2_POP_1,
+ push_dot1q=11)
+
+ #
+ # Send traffic in both directiond - expect the VLAN tags to
+ # be swapped.
+ #
+ tx = self.create_tunnel_stream_vlano4(self.pg0,
+ "2.2.2.2",
+ self.pg0.local_ip4,
+ 11)
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_tunneled_vlano4(self.pg0, rx, tx,
+ self.pg0.local_ip4,
+ "2.2.2.3",
+ 12)
+
+ tx = self.create_tunnel_stream_vlano4(self.pg0,
+ "2.2.2.3",
+ self.pg0.local_ip4,
+ 12)
+ rx = self.send_and_expect(self.pg0, tx, self.pg0)
+ self.verify_tunneled_vlano4(self.pg0, rx, tx,
+ self.pg0.local_ip4,
+ "2.2.2.2",
+ 11)
+
+ #
+ # Cleanup Test resources
+ #
+ gre_if_11.remove_vpp_config()
+ gre_if_12.remove_vpp_config()
+ gre_if1.remove_vpp_config()
+ gre_if2.remove_vpp_config()
+ route_tun1_dst.add_vpp_config()
+ route_tun2_dst.add_vpp_config()
+
+ def test_gre_loop(self):
+ """ GRE tunnel loop Tests """
+
+ #
+ # Create an L3 GRE tunnel.
+ # - set it admin up
+ # - assign an IP Addres
+ #
+ gre_if = VppGreInterface(self,
+ self.pg0.local_ip4,
+ "1.1.1.2")
+ gre_if.add_vpp_config()
+ gre_if.admin_up()
+ gre_if.config_ip4()
+
+ #
+ # add a route to the tunnel's destination that points
+ # through the tunnel, hence forming a loop in the forwarding
+ # graph
+ #
+ route_dst = VppIpRoute(self, "1.1.1.2", 32,
+ [VppRoutePath("0.0.0.0",
+ gre_if.sw_if_index)])
+ route_dst.add_vpp_config()
+
+ #
+ # packets to the tunnels destination should be dropped
+ #
+ tx = self.create_stream_ip4(self.pg0, "1.1.1.1", "1.1.1.2")
+ self.send_and_assert_no_replies(self.pg2, tx)
+
+ self.logger.info(self.vapi.ppcli("sh adj 7"))
+
+ #
+ # break the loop
+ #
+ route_dst.modify([VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index)])
+ route_dst.add_vpp_config()
+
+ rx = self.send_and_expect(self.pg0, tx, self.pg1)
+
+ #
+ # a good route throught the tunnel to check it restacked
+ #
+ route_via_tun_2 = VppIpRoute(self, "2.2.2.2", 32,
+ [VppRoutePath("0.0.0.0",
+ gre_if.sw_if_index)])
+ route_via_tun_2.add_vpp_config()
+
+ tx = self.create_stream_ip4(self.pg0, "2.2.2.3", "2.2.2.2")
+ rx = self.send_and_expect(self.pg0, tx, self.pg1)
+ self.verify_tunneled_4o4(self.pg1, rx, tx,
+ self.pg0.local_ip4, "1.1.1.2")
+
+ #
+ # cleanup
+ #
+ route_via_tun_2.remove_vpp_config()
+ gre_if.remove_vpp_config()
+
+ def test_mgre(self):
+ """ mGRE IPv4 tunnel Tests """
+
+ for itf in self.pg_interfaces[3:]:
+ #
+ # one underlay nh for each overlay/tunnel peer
+ #
+ itf.generate_remote_hosts(4)
+ itf.configure_ipv4_neighbors()
+
+ #
+ # Create an L3 GRE tunnel.
+ # - set it admin up
+ # - assign an IP Addres
+ # - Add a route via the tunnel
+ #
+ gre_if = VppGreInterface(self,
+ itf.local_ip4,
+ "0.0.0.0",
+ mode=(VppEnum.vl_api_tunnel_mode_t.
+ TUNNEL_API_MODE_MP))
+ gre_if.add_vpp_config()
+ gre_if.admin_up()
+ gre_if.config_ip4()
+ gre_if.generate_remote_hosts(4)
+
+ self.logger.info(self.vapi.cli("sh adj"))
+ self.logger.info(self.vapi.cli("sh ip fib"))
+
+ #
+ # ensure we don't match to the tunnel if the source address
+ # is all zeros
+ #
+ tx = self.create_tunnel_stream_4o4(self.pg0,
+ "0.0.0.0",
+ itf.local_ip4,
+ self.pg0.local_ip4,
+ self.pg0.remote_ip4)
+ self.send_and_assert_no_replies(self.pg0, tx)
+
+ #
+ # for-each peer
+ #
+ for ii in range(1, 4):
+ route_addr = "4.4.4.%d" % ii
+ tx_e = self.create_stream_ip4(self.pg0, "5.5.5.5", route_addr)
+
+ #
+ # route traffic via the peer
+ #
+ route_via_tun = VppIpRoute(
+ self, route_addr, 32,
+ [VppRoutePath(gre_if._remote_hosts[ii].ip4,
+ gre_if.sw_if_index)])
+ route_via_tun.add_vpp_config()
+
+ # all packets dropped at this point
+ rx = self.send_and_assert_no_replies(self.pg0, tx_e)
+
+ gre_if.admin_down()
+ gre_if.admin_up()
+ rx = self.send_and_assert_no_replies(self.pg0, tx_e)
+
+ #
+ # Add a TEIB entry resolves the peer
+ #
+ teib = VppTeib(self, gre_if,
+ gre_if._remote_hosts[ii].ip4,
+ itf._remote_hosts[ii].ip4)
+ teib.add_vpp_config()
+
+ #
+ # Send a packet stream that is routed into the tunnel
+ # - packets are GRE encapped
+ #
+ rx = self.send_and_expect(self.pg0, tx_e, itf)
+ self.verify_tunneled_4o4(self.pg0, rx, tx_e,
+ itf.local_ip4,
+ itf._remote_hosts[ii].ip4)
+
+ tx_i = self.create_tunnel_stream_4o4(self.pg0,
+ itf._remote_hosts[ii].ip4,
+ itf.local_ip4,
+ self.pg0.local_ip4,
+ self.pg0.remote_ip4)
+ rx = self.send_and_expect(self.pg0, tx_i, self.pg0)
+ self.verify_decapped_4o4(self.pg0, rx, tx_i)
+
+ #
+ # delete and re-add the TEIB
+ #
+ teib.remove_vpp_config()
+ self.send_and_assert_no_replies(self.pg0, tx_e)
+ self.send_and_assert_no_replies(self.pg0, tx_i)
+
+ teib.add_vpp_config()
+ rx = self.send_and_expect(self.pg0, tx_e, itf)
+ self.verify_tunneled_4o4(self.pg0, rx, tx_e,
+ itf.local_ip4,
+ itf._remote_hosts[ii].ip4)
+ rx = self.send_and_expect(self.pg0, tx_i, self.pg0)
+ self.verify_decapped_4o4(self.pg0, rx, tx_i)
+
+ #
+ # bounce the interface state and try packets again
+ #
+ gre_if.admin_down()
+ gre_if.admin_up()
+ rx = self.send_and_expect(self.pg0, tx_e, itf)
+ self.verify_tunneled_4o4(self.pg0, rx, tx_e,
+ itf.local_ip4,
+ itf._remote_hosts[ii].ip4)
+ rx = self.send_and_expect(self.pg0, tx_i, self.pg0)
+ self.verify_decapped_4o4(self.pg0, rx, tx_i)
+
+ gre_if.admin_down()
+ gre_if.unconfig_ip4()
+
+ def test_mgre6(self):
+ """ mGRE IPv6 tunnel Tests """
+
+ self.pg0.config_ip6()
+ self.pg0.resolve_ndp()
+
+ e = VppEnum.vl_api_tunnel_encap_decap_flags_t
+
+ for itf in self.pg_interfaces[3:]:
+ #
+ # one underlay nh for each overlay/tunnel peer
+ #
+ itf.config_ip6()
+ itf.generate_remote_hosts(4)
+ itf.configure_ipv6_neighbors()
+
+ #
+ # Create an L3 GRE tunnel.
+ # - set it admin up
+ # - assign an IP Addres
+ # - Add a route via the tunnel
+ #
+ gre_if = VppGreInterface(
+ self,
+ itf.local_ip6,
+ "::",
+ mode=(VppEnum.vl_api_tunnel_mode_t.
+ TUNNEL_API_MODE_MP),
+ flags=e.TUNNEL_API_ENCAP_DECAP_FLAG_ENCAP_COPY_DSCP)
+
+ gre_if.add_vpp_config()
+ gre_if.admin_up()
+ gre_if.config_ip6()
+ gre_if.generate_remote_hosts(4)
+
+ #
+ # for-each peer
+ #
+ for ii in range(1, 4):
+ route_addr = "4::%d" % ii
+
+ #
+ # Add a TEIB entry resolves the peer
+ #
+ teib = VppTeib(self, gre_if,
+ gre_if._remote_hosts[ii].ip6,
+ itf._remote_hosts[ii].ip6)
+ teib.add_vpp_config()
+
+ #
+ # route traffic via the peer
+ #
+ route_via_tun = VppIpRoute(
+ self, route_addr, 128,
+ [VppRoutePath(gre_if._remote_hosts[ii].ip6,
+ gre_if.sw_if_index)])
+ route_via_tun.add_vpp_config()
+
+ #
+ # Send a packet stream that is routed into the tunnel
+ # - packets are GRE encapped
+ #
+ tx_e = self.create_stream_ip6(self.pg0, "5::5", route_addr,
+ dscp=2, ecn=1)
+ rx = self.send_and_expect(self.pg0, tx_e, itf)
+ self.verify_tunneled_6o6(self.pg0, rx, tx_e,
+ itf.local_ip6,
+ itf._remote_hosts[ii].ip6,
+ dscp=2)
+ tx_i = self.create_tunnel_stream_6o6(self.pg0,
+ itf._remote_hosts[ii].ip6,
+ itf.local_ip6,
+ self.pg0.local_ip6,
+ self.pg0.remote_ip6)
+ rx = self.send_and_expect(self.pg0, tx_i, self.pg0)
+ self.verify_decapped_6o6(self.pg0, rx, tx_i)
+
+ #
+ # delete and re-add the TEIB
+ #
+ teib.remove_vpp_config()
+ self.send_and_assert_no_replies(self.pg0, tx_e)
+
+ teib.add_vpp_config()
+ rx = self.send_and_expect(self.pg0, tx_e, itf)
+ self.verify_tunneled_6o6(self.pg0, rx, tx_e,
+ itf.local_ip6,
+ itf._remote_hosts[ii].ip6,
+ dscp=2)
+ rx = self.send_and_expect(self.pg0, tx_i, self.pg0)
+ self.verify_decapped_6o6(self.pg0, rx, tx_i)
+
+ gre_if.admin_down()
+ gre_if.unconfig_ip4()
+ itf.unconfig_ip6()
+ self.pg0.unconfig_ip6()
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_gro.py b/test/test_gro.py
new file mode 100644
index 00000000000..33215d65fa7
--- /dev/null
+++ b/test/test_gro.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python3
+"""GRO functional tests"""
+
+#
+# Add tests for:
+# - GRO
+# - Verify that sending 1500 Bytes frame without GRO enabled correctly
+# - Verify that sending 1500 Bytes frame with GRO enabled correctly
+#
+import unittest
+
+from scapy.packet import Raw
+from scapy.layers.inet6 import IPv6, Ether, IP, UDP, ICMPv6PacketTooBig
+from scapy.layers.inet6 import ipv6nh, IPerror6
+from scapy.layers.inet import TCP, ICMP
+from scapy.data import ETH_P_IP, ETH_P_IPV6, ETH_P_ARP
+
+from framework import VppTestCase, VppTestRunner
+from vpp_object import VppObject
+from vpp_interface import VppInterface
+
+
+""" Test_gro is a subclass of VPPTestCase classes.
+ GRO tests.
+"""
+
+
+class TestGRO(VppTestCase):
+ """ GRO Test Case """
+
+ @classmethod
+ def setUpClass(self):
+ super(TestGRO, self).setUpClass()
+ res = self.create_pg_interfaces(range(2))
+ res_gro = self.create_pg_interfaces(range(2, 3), 1, 1460)
+ self.create_pg_interfaces(range(3, 4), 1, 8940)
+ self.pg_interfaces.append(res[0])
+ self.pg_interfaces.append(res[1])
+ self.pg_interfaces.append(res_gro[0])
+ self.pg2.coalesce_enable()
+ self.pg3.coalesce_enable()
+
+ @classmethod
+ def tearDownClass(self):
+ super(TestGRO, self).tearDownClass()
+
+ def setUp(self):
+ super(TestGRO, self).setUp()
+ for i in self.pg_interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.config_ip6()
+ i.disable_ipv6_ra()
+ i.resolve_arp()
+ i.resolve_ndp()
+
+ def tearDown(self):
+ super(TestGRO, self).tearDown()
+ if not self.vpp_dead:
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.unconfig_ip6()
+ i.admin_down()
+
+ def test_gro(self):
+ """ GRO test """
+
+ n_packets = 124
+ #
+ # Send 1500 bytes frame with gro disabled
+ #
+ p4 = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4,
+ flags='DF') /
+ TCP(sport=1234, dport=4321) /
+ Raw(b'\xa5' * 1460))
+
+ rxs = self.send_and_expect(self.pg0, n_packets * p4, self.pg1)
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg1.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg1.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg0.remote_ip4)
+ self.assertEqual(rx[IP].dst, self.pg1.remote_ip4)
+ self.assertEqual(rx[TCP].sport, 1234)
+ self.assertEqual(rx[TCP].dport, 4321)
+
+ #
+ # Send 1500 bytes frame with gro enabled on
+ # output interfaces support GRO
+ #
+ p = []
+ s = 0
+ for n in range(0, n_packets):
+ p.append((Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg2.remote_ip4,
+ flags='DF') /
+ TCP(sport=1234, dport=4321, seq=s, ack=n, flags='A') /
+ Raw(b'\xa5' * 1460)))
+ s += 1460
+
+ rxs = self.send_and_expect(self.pg0, p, self.pg2, n_rx=2)
+
+ i = 0
+ for rx in rxs:
+ i += 1
+ self.assertEqual(rx[Ether].src, self.pg2.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg2.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg0.remote_ip4)
+ self.assertEqual(rx[IP].dst, self.pg2.remote_ip4)
+ self.assertEqual(rx[IP].len, 64280) # 1460 * 44 + 40 < 65536
+ self.assertEqual(rx[TCP].sport, 1234)
+ self.assertEqual(rx[TCP].dport, 4321)
+ self.assertEqual(rx[TCP].ack, (44*i - 1))
+
+ p4_temp = (Ether(src=self.pg2.remote_mac, dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_ip4, dst=self.pg0.remote_ip4,
+ flags='DF') /
+ TCP(sport=1234, dport=4321, flags='F'))
+
+ rxs = self.send_and_expect(self.pg2, 100*[p4_temp], self.pg0, n_rx=100)
+ rx_coalesce = self.pg2.get_capture(1, timeout=1)
+
+ rx0 = rx_coalesce[0]
+ self.assertEqual(rx0[Ether].src, self.pg2.local_mac)
+ self.assertEqual(rx0[Ether].dst, self.pg2.remote_mac)
+ self.assertEqual(rx0[IP].src, self.pg0.remote_ip4)
+ self.assertEqual(rx0[IP].dst, self.pg2.remote_ip4)
+ self.assertEqual(rx0[IP].len, 52600) # 1460 * 36 + 40
+ self.assertEqual(rx0[TCP].sport, 1234)
+ self.assertEqual(rx0[TCP].dport, 4321)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg0.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg2.remote_ip4)
+ self.assertEqual(rx[IP].dst, self.pg0.remote_ip4)
+ self.assertEqual(rx[IP].len, 40)
+ self.assertEqual(rx[TCP].sport, 1234)
+ self.assertEqual(rx[TCP].dport, 4321)
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_gso.py b/test/test_gso.py
new file mode 100644
index 00000000000..094600eb74c
--- /dev/null
+++ b/test/test_gso.py
@@ -0,0 +1,722 @@
+#!/usr/bin/env python3
+"""GSO functional tests"""
+
+#
+# Add tests for:
+# - GSO
+# - Verify that sending Jumbo frame without GSO enabled correctly
+# - Verify that sending Jumbo frame with GSO enabled correctly
+# - Verify that sending Jumbo frame with GSO enabled only on ingress interface
+#
+import unittest
+
+from scapy.packet import Raw
+from scapy.layers.inet6 import IPv6, Ether, IP, UDP, ICMPv6PacketTooBig
+from scapy.layers.inet6 import ipv6nh, IPerror6
+from scapy.layers.inet import TCP, ICMP
+from scapy.layers.vxlan import VXLAN
+from scapy.data import ETH_P_IP, ETH_P_IPV6, ETH_P_ARP
+
+from framework import VppTestCase, VppTestRunner
+from vpp_object import VppObject
+from vpp_interface import VppInterface
+from vpp_ip import DpoProto
+from vpp_ip_route import VppIpRoute, VppRoutePath, FibPathProto
+from vpp_ipip_tun_interface import VppIpIpTunInterface
+from vpp_vxlan_tunnel import VppVxlanTunnel
+from socket import AF_INET, AF_INET6, inet_pton
+from util import reassemble4
+
+
+""" Test_gso is a subclass of VPPTestCase classes.
+ GSO tests.
+"""
+
+
+class TestGSO(VppTestCase):
+ """ GSO Test Case """
+
+ def __init__(self, *args):
+ VppTestCase.__init__(self, *args)
+
+ @classmethod
+ def setUpClass(self):
+ super(TestGSO, self).setUpClass()
+ res = self.create_pg_interfaces(range(2))
+ res_gso = self.create_pg_interfaces(range(2, 4), 1, 1460)
+ self.create_pg_interfaces(range(4, 5), 1, 8940)
+ self.pg_interfaces.append(res[0])
+ self.pg_interfaces.append(res[1])
+ self.pg_interfaces.append(res_gso[0])
+ self.pg_interfaces.append(res_gso[1])
+
+ @classmethod
+ def tearDownClass(self):
+ super(TestGSO, self).tearDownClass()
+
+ def setUp(self):
+ super(TestGSO, self).setUp()
+ for i in self.pg_interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.config_ip6()
+ i.disable_ipv6_ra()
+ i.resolve_arp()
+ i.resolve_ndp()
+
+ self.single_tunnel_bd = 10
+ self.vxlan = VppVxlanTunnel(self, src=self.pg0.local_ip4,
+ dst=self.pg0.remote_ip4,
+ vni=self.single_tunnel_bd)
+
+ self.vxlan2 = VppVxlanTunnel(self, src=self.pg0.local_ip6,
+ dst=self.pg0.remote_ip6,
+ vni=self.single_tunnel_bd)
+
+ self.ipip4 = VppIpIpTunInterface(self, self.pg0, self.pg0.local_ip4,
+ self.pg0.remote_ip4)
+ self.ipip6 = VppIpIpTunInterface(self, self.pg0, self.pg0.local_ip6,
+ self.pg0.remote_ip6)
+
+ def tearDown(self):
+ super(TestGSO, self).tearDown()
+ if not self.vpp_dead:
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.unconfig_ip6()
+ i.admin_down()
+
+ def test_gso(self):
+ """ GSO test """
+ #
+ # Send jumbo frame with gso disabled and DF bit is set
+ #
+ p4 = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4,
+ flags='DF') /
+ TCP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 65200))
+
+ rxs = self.send_and_expect(self.pg0, [p4], self.pg0)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg0.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg0.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg0.remote_ip4)
+ self.assertEqual(rx[ICMP].type, 3) # "dest-unreach"
+ self.assertEqual(rx[ICMP].code, 4) # "fragmentation-needed"
+
+ #
+ # Send checksum offload frames
+ #
+ p40 = (Ether(src=self.pg2.remote_mac, dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_ip4, dst=self.pg0.remote_ip4,
+ flags='DF') /
+ TCP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 1460))
+
+ rxs = self.send_and_expect(self.pg2, 100*[p40], self.pg0)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg0.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg2.remote_ip4)
+ self.assertEqual(rx[IP].dst, self.pg0.remote_ip4)
+ payload_len = rx[IP].len - 20 - 20
+ self.assert_ip_checksum_valid(rx)
+ self.assert_tcp_checksum_valid(rx)
+ self.assertEqual(payload_len, len(rx[Raw]))
+
+ p60 = (Ether(src=self.pg2.remote_mac, dst=self.pg2.local_mac) /
+ IPv6(src=self.pg2.remote_ip6, dst=self.pg0.remote_ip6) /
+ TCP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 1440))
+
+ rxs = self.send_and_expect(self.pg2, 100*[p60], self.pg0)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg0.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
+ self.assertEqual(rx[IPv6].src, self.pg2.remote_ip6)
+ self.assertEqual(rx[IPv6].dst, self.pg0.remote_ip6)
+ payload_len = rx[IPv6].plen - 20
+ self.assert_tcp_checksum_valid(rx)
+ self.assertEqual(payload_len, len(rx[Raw]))
+
+ #
+ # Send jumbo frame with gso enabled and DF bit is set
+ # input and output interfaces support GSO
+ #
+ self.vapi.feature_gso_enable_disable(sw_if_index=self.pg3.sw_if_index,
+ enable_disable=1)
+ p41 = (Ether(src=self.pg2.remote_mac, dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_ip4, dst=self.pg3.remote_ip4,
+ flags='DF') /
+ TCP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 65200))
+
+ rxs = self.send_and_expect(self.pg2, 100*[p41], self.pg3, 100)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg3.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg3.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg2.remote_ip4)
+ self.assertEqual(rx[IP].dst, self.pg3.remote_ip4)
+ self.assertEqual(rx[IP].len, 65240) # 65200 + 20 (IP) + 20 (TCP)
+ self.assertEqual(rx[TCP].sport, 1234)
+ self.assertEqual(rx[TCP].dport, 1234)
+
+ #
+ # ipv6
+ #
+ p61 = (Ether(src=self.pg2.remote_mac, dst=self.pg2.local_mac) /
+ IPv6(src=self.pg2.remote_ip6, dst=self.pg3.remote_ip6) /
+ TCP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 65200))
+
+ rxs = self.send_and_expect(self.pg2, 100*[p61], self.pg3, 100)
+
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg3.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg3.remote_mac)
+ self.assertEqual(rx[IPv6].src, self.pg2.remote_ip6)
+ self.assertEqual(rx[IPv6].dst, self.pg3.remote_ip6)
+ self.assertEqual(rx[IPv6].plen, 65220) # 65200 + 20 (TCP)
+ self.assertEqual(rx[TCP].sport, 1234)
+ self.assertEqual(rx[TCP].dport, 1234)
+
+ #
+ # Send jumbo frame with gso enabled only on input interface
+ # and DF bit is set. GSO packet will be chunked into gso_size
+ # data payload
+ #
+ self.vapi.feature_gso_enable_disable(sw_if_index=self.pg0.sw_if_index,
+ enable_disable=1)
+ p42 = (Ether(src=self.pg2.remote_mac, dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_ip4, dst=self.pg0.remote_ip4,
+ flags='DF') /
+ TCP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 65200))
+
+ rxs = self.send_and_expect(self.pg2, 5*[p42], self.pg0, 225)
+ size = 0
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg0.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg2.remote_ip4)
+ self.assertEqual(rx[IP].dst, self.pg0.remote_ip4)
+ payload_len = rx[IP].len - 20 - 20 # len - 20 (IP4) - 20 (TCP)
+ self.assert_ip_checksum_valid(rx)
+ self.assert_tcp_checksum_valid(rx)
+ self.assertEqual(rx[TCP].sport, 1234)
+ self.assertEqual(rx[TCP].dport, 1234)
+ self.assertEqual(payload_len, len(rx[Raw]))
+ size += payload_len
+ self.assertEqual(size, 65200*5)
+
+ #
+ # ipv6
+ #
+ p62 = (Ether(src=self.pg2.remote_mac, dst=self.pg2.local_mac) /
+ IPv6(src=self.pg2.remote_ip6, dst=self.pg0.remote_ip6) /
+ TCP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 65200))
+
+ rxs = self.send_and_expect(self.pg2, 5*[p62], self.pg0, 225)
+ size = 0
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg0.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
+ self.assertEqual(rx[IPv6].src, self.pg2.remote_ip6)
+ self.assertEqual(rx[IPv6].dst, self.pg0.remote_ip6)
+ payload_len = rx[IPv6].plen - 20
+ self.assert_tcp_checksum_valid(rx)
+ self.assertEqual(rx[TCP].sport, 1234)
+ self.assertEqual(rx[TCP].dport, 1234)
+ self.assertEqual(payload_len, len(rx[Raw]))
+ size += payload_len
+ self.assertEqual(size, 65200*5)
+
+ #
+ # Send jumbo frame with gso enabled only on input interface
+ # and DF bit is unset. GSO packet will be fragmented.
+ #
+ self.vapi.sw_interface_set_mtu(self.pg1.sw_if_index, [576, 0, 0, 0])
+ self.vapi.feature_gso_enable_disable(sw_if_index=self.pg1.sw_if_index,
+ enable_disable=1)
+
+ p43 = (Ether(src=self.pg2.remote_mac, dst=self.pg2.local_mac) /
+ IP(src=self.pg2.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 65200))
+
+ rxs = self.send_and_expect(self.pg2, 5*[p43], self.pg1, 5*119)
+ size = 0
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg1.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg1.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg2.remote_ip4)
+ self.assertEqual(rx[IP].dst, self.pg1.remote_ip4)
+ self.assert_ip_checksum_valid(rx)
+ size += rx[IP].len - 20
+ size -= 20*5 # TCP header
+ self.assertEqual(size, 65200*5)
+
+ #
+ # IPv6
+ # Send jumbo frame with gso enabled only on input interface.
+ # ICMPv6 Packet Too Big will be sent back to sender.
+ #
+ self.vapi.sw_interface_set_mtu(self.pg1.sw_if_index, [1280, 0, 0, 0])
+ p63 = (Ether(src=self.pg2.remote_mac, dst=self.pg2.local_mac) /
+ IPv6(src=self.pg2.remote_ip6, dst=self.pg1.remote_ip6) /
+ TCP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 65200))
+
+ rxs = self.send_and_expect(self.pg2, 5*[p63], self.pg2, 5)
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg2.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg2.remote_mac)
+ self.assertEqual(rx[IPv6].src, self.pg2.local_ip6)
+ self.assertEqual(rx[IPv6].dst, self.pg2.remote_ip6)
+ self.assertEqual(rx[IPv6].plen, 1240) # MTU - IPv6 header
+ self.assertEqual(ipv6nh[rx[IPv6].nh], "ICMPv6")
+ self.assertEqual(rx[ICMPv6PacketTooBig].mtu, 1280)
+ self.assertEqual(rx[IPerror6].src, self.pg2.remote_ip6)
+ self.assertEqual(rx[IPerror6].dst, self.pg1.remote_ip6)
+ self.assertEqual(rx[IPerror6].plen - 20, 65200)
+
+ #
+ # Send jumbo frame with gso enabled only on input interface with 9K MTU
+ # and DF bit is unset. GSO packet will be fragmented. MSS is 8960. GSO
+ # size will be min(MSS, 2048 - 14 - 20) vlib_buffer_t size
+ #
+ self.vapi.sw_interface_set_mtu(self.pg1.sw_if_index, [9000, 0, 0, 0])
+ self.vapi.sw_interface_set_mtu(self.pg4.sw_if_index, [9000, 0, 0, 0])
+ p44 = (Ether(src=self.pg4.remote_mac, dst=self.pg4.local_mac) /
+ IP(src=self.pg4.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 65200))
+
+ rxs = self.send_and_expect(self.pg4, 5*[p44], self.pg1, 165)
+ size = 0
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg1.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg1.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg4.remote_ip4)
+ self.assertEqual(rx[IP].dst, self.pg1.remote_ip4)
+ payload_len = rx[IP].len - 20 - 20 # len - 20 (IP4) - 20 (TCP)
+ self.assert_ip_checksum_valid(rx)
+ self.assert_tcp_checksum_valid(rx)
+ self.assertEqual(payload_len, len(rx[Raw]))
+ size += payload_len
+ self.assertEqual(size, 65200*5)
+
+ #
+ # IPv6
+ #
+ p64 = (Ether(src=self.pg4.remote_mac, dst=self.pg4.local_mac) /
+ IPv6(src=self.pg4.remote_ip6, dst=self.pg1.remote_ip6) /
+ TCP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 65200))
+
+ rxs = self.send_and_expect(self.pg4, 5*[p64], self.pg1, 170)
+ size = 0
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg1.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg1.remote_mac)
+ self.assertEqual(rx[IPv6].src, self.pg4.remote_ip6)
+ self.assertEqual(rx[IPv6].dst, self.pg1.remote_ip6)
+ payload_len = rx[IPv6].plen - 20
+ self.assert_tcp_checksum_valid(rx)
+ self.assertEqual(payload_len, len(rx[Raw]))
+ size += payload_len
+ self.assertEqual(size, 65200*5)
+
+ self.vapi.feature_gso_enable_disable(sw_if_index=self.pg0.sw_if_index,
+ enable_disable=0)
+ self.vapi.feature_gso_enable_disable(sw_if_index=self.pg1.sw_if_index,
+ enable_disable=0)
+
+ def test_gso_vxlan(self):
+ """ GSO VXLAN test """
+ self.logger.info(self.vapi.cli("sh int addr"))
+ #
+ # Send jumbo frame with gso enabled only on input interface and
+ # create VXLAN VTEP on VPP pg0, and put vxlan_tunnel0 and pg2
+ # into BD.
+ #
+
+ #
+ # enable ipv4/vxlan
+ #
+ self.vxlan.add_vpp_config()
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=self.vxlan.sw_if_index, bd_id=self.single_tunnel_bd)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=self.pg2.sw_if_index, bd_id=self.single_tunnel_bd)
+ self.vapi.feature_gso_enable_disable(sw_if_index=self.pg0.sw_if_index,
+ enable_disable=1)
+
+ #
+ # IPv4/IPv4 - VXLAN
+ #
+ p45 = (Ether(src=self.pg2.remote_mac, dst="02:fe:60:1e:a2:79") /
+ IP(src=self.pg2.remote_ip4, dst="172.16.3.3", flags='DF') /
+ TCP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 65200))
+
+ rxs = self.send_and_expect(self.pg2, 5*[p45], self.pg0, 225)
+ size = 0
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg0.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg0.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg0.remote_ip4)
+ self.assert_ip_checksum_valid(rx)
+ self.assert_udp_checksum_valid(rx, ignore_zero_checksum=False)
+ self.assertEqual(rx[VXLAN].vni, 10)
+ inner = rx[VXLAN].payload
+ self.assertEqual(rx[IP].len - 20 - 8 - 8, len(inner))
+ self.assertEqual(inner[Ether].src, self.pg2.remote_mac)
+ self.assertEqual(inner[Ether].dst, "02:fe:60:1e:a2:79")
+ self.assertEqual(inner[IP].src, self.pg2.remote_ip4)
+ self.assertEqual(inner[IP].dst, "172.16.3.3")
+ self.assert_ip_checksum_valid(inner)
+ self.assert_tcp_checksum_valid(inner)
+ payload_len = inner[IP].len - 20 - 20
+ self.assertEqual(payload_len, len(inner[Raw]))
+ size += payload_len
+ self.assertEqual(size, 65200*5)
+
+ #
+ # IPv4/IPv6 - VXLAN
+ #
+ p65 = (Ether(src=self.pg2.remote_mac, dst="02:fe:60:1e:a2:79") /
+ IPv6(src=self.pg2.remote_ip6, dst="fd01:3::3") /
+ TCP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 65200))
+
+ rxs = self.send_and_expect(self.pg2, 5*[p65], self.pg0, 225)
+ size = 0
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg0.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg0.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg0.remote_ip4)
+ self.assert_ip_checksum_valid(rx)
+ self.assert_udp_checksum_valid(rx, ignore_zero_checksum=False)
+ self.assertEqual(rx[VXLAN].vni, 10)
+ inner = rx[VXLAN].payload
+ self.assertEqual(rx[IP].len - 20 - 8 - 8, len(inner))
+ self.assertEqual(inner[Ether].src, self.pg2.remote_mac)
+ self.assertEqual(inner[Ether].dst, "02:fe:60:1e:a2:79")
+ self.assertEqual(inner[IPv6].src, self.pg2.remote_ip6)
+ self.assertEqual(inner[IPv6].dst, "fd01:3::3")
+ self.assert_tcp_checksum_valid(inner)
+ payload_len = inner[IPv6].plen - 20
+ self.assertEqual(payload_len, len(inner[Raw]))
+ size += payload_len
+ self.assertEqual(size, 65200*5)
+
+ #
+ # disable ipv4/vxlan
+ #
+ self.vxlan.remove_vpp_config()
+
+ #
+ # enable ipv6/vxlan
+ #
+ self.vxlan2.add_vpp_config()
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=self.vxlan2.sw_if_index,
+ bd_id=self.single_tunnel_bd)
+
+ #
+ # IPv6/IPv4 - VXLAN
+ #
+ p46 = (Ether(src=self.pg2.remote_mac, dst="02:fe:60:1e:a2:79") /
+ IP(src=self.pg2.remote_ip4, dst="172.16.3.3", flags='DF') /
+ TCP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 65200))
+
+ rxs = self.send_and_expect(self.pg2, 5*[p46], self.pg0, 225)
+ size = 0
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg0.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
+ self.assertEqual(rx[IPv6].src, self.pg0.local_ip6)
+ self.assertEqual(rx[IPv6].dst, self.pg0.remote_ip6)
+ self.assert_udp_checksum_valid(rx, ignore_zero_checksum=False)
+ self.assertEqual(rx[VXLAN].vni, 10)
+ inner = rx[VXLAN].payload
+ self.assertEqual(rx[IPv6].plen - 8 - 8, len(inner))
+ self.assertEqual(inner[Ether].src, self.pg2.remote_mac)
+ self.assertEqual(inner[Ether].dst, "02:fe:60:1e:a2:79")
+ self.assertEqual(inner[IP].src, self.pg2.remote_ip4)
+ self.assertEqual(inner[IP].dst, "172.16.3.3")
+ self.assert_ip_checksum_valid(inner)
+ self.assert_tcp_checksum_valid(inner)
+ payload_len = inner[IP].len - 20 - 20
+ self.assertEqual(payload_len, len(inner[Raw]))
+ size += payload_len
+ self.assertEqual(size, 65200*5)
+
+ #
+ # IPv6/IPv6 - VXLAN
+ #
+ p66 = (Ether(src=self.pg2.remote_mac, dst="02:fe:60:1e:a2:79") /
+ IPv6(src=self.pg2.remote_ip6, dst="fd01:3::3") /
+ TCP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 65200))
+
+ rxs = self.send_and_expect(self.pg2, 5*[p66], self.pg0, 225)
+ size = 0
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg0.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
+ self.assertEqual(rx[IPv6].src, self.pg0.local_ip6)
+ self.assertEqual(rx[IPv6].dst, self.pg0.remote_ip6)
+ self.assert_udp_checksum_valid(rx, ignore_zero_checksum=False)
+ self.assertEqual(rx[VXLAN].vni, 10)
+ inner = rx[VXLAN].payload
+ self.assertEqual(rx[IPv6].plen - 8 - 8, len(inner))
+ self.assertEqual(inner[Ether].src, self.pg2.remote_mac)
+ self.assertEqual(inner[Ether].dst, "02:fe:60:1e:a2:79")
+ self.assertEqual(inner[IPv6].src, self.pg2.remote_ip6)
+ self.assertEqual(inner[IPv6].dst, "fd01:3::3")
+ self.assert_tcp_checksum_valid(inner)
+ payload_len = inner[IPv6].plen - 20
+ self.assertEqual(payload_len, len(inner[Raw]))
+ size += payload_len
+ self.assertEqual(size, 65200*5)
+
+ #
+ # disable ipv4/vxlan
+ #
+ self.vxlan2.remove_vpp_config()
+
+ self.vapi.feature_gso_enable_disable(sw_if_index=self.pg0.sw_if_index,
+ enable_disable=0)
+
+ def test_gso_ipip(self):
+ """ GSO IPIP test """
+ self.logger.info(self.vapi.cli("sh int addr"))
+ #
+ # Send jumbo frame with gso enabled only on input interface and
+ # create IPIP tunnel on VPP pg0.
+ #
+ self.vapi.feature_gso_enable_disable(sw_if_index=self.pg0.sw_if_index,
+ enable_disable=1)
+
+ #
+ # enable ipip4
+ #
+ self.ipip4.add_vpp_config()
+
+ # Set interface up and enable IP on it
+ self.ipip4.admin_up()
+ self.ipip4.set_unnumbered(self.pg0.sw_if_index)
+
+ # Add IPv4 routes via tunnel interface
+ self.ip4_via_ip4_tunnel = VppIpRoute(
+ self, "172.16.10.0", 24,
+ [VppRoutePath("0.0.0.0",
+ self.ipip4.sw_if_index,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_IP4)])
+ self.ip4_via_ip4_tunnel.add_vpp_config()
+
+ #
+ # IPv4/IPv4 - IPIP
+ #
+ p47 = (Ether(src=self.pg2.remote_mac, dst="02:fe:60:1e:a2:79") /
+ IP(src=self.pg2.remote_ip4, dst="172.16.10.3", flags='DF') /
+ TCP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 65200))
+
+ rxs = self.send_and_expect(self.pg2, 5*[p47], self.pg0, 225)
+ size = 0
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg0.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg0.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg0.remote_ip4)
+ self.assert_ip_checksum_valid(rx)
+ self.assertEqual(rx[IP].proto, 4) # ipencap
+ inner = rx[IP].payload
+ self.assertEqual(rx[IP].len - 20, len(inner))
+ self.assertEqual(inner[IP].src, self.pg2.remote_ip4)
+ self.assertEqual(inner[IP].dst, "172.16.10.3")
+ self.assert_ip_checksum_valid(inner)
+ self.assert_tcp_checksum_valid(inner)
+ payload_len = inner[IP].len - 20 - 20
+ self.assertEqual(payload_len, len(inner[Raw]))
+ size += payload_len
+ self.assertEqual(size, 65200*5)
+
+ self.ip6_via_ip4_tunnel = VppIpRoute(
+ self, "fd01:10::", 64,
+ [VppRoutePath("::",
+ self.ipip4.sw_if_index,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)])
+ self.ip6_via_ip4_tunnel.add_vpp_config()
+ #
+ # IPv4/IPv6 - IPIP
+ #
+ p67 = (Ether(src=self.pg2.remote_mac, dst="02:fe:60:1e:a2:79") /
+ IPv6(src=self.pg2.remote_ip6, dst="fd01:10::3") /
+ TCP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 65200))
+
+ rxs = self.send_and_expect(self.pg2, 5*[p67], self.pg0, 225)
+ size = 0
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg0.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg0.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg0.remote_ip4)
+ self.assert_ip_checksum_valid(rx)
+ self.assertEqual(rx[IP].proto, 41) # ipv6
+ inner = rx[IP].payload
+ self.assertEqual(rx[IP].len - 20, len(inner))
+ self.assertEqual(inner[IPv6].src, self.pg2.remote_ip6)
+ self.assertEqual(inner[IPv6].dst, "fd01:10::3")
+ self.assert_tcp_checksum_valid(inner)
+ payload_len = inner[IPv6].plen - 20
+ self.assertEqual(payload_len, len(inner[Raw]))
+ size += payload_len
+ self.assertEqual(size, 65200*5)
+
+ #
+ # Send jumbo frame with gso enabled only on input interface and
+ # create IPIP tunnel on VPP pg0. Enable gso feature node on ipip
+ # tunnel - IPSec use case
+ #
+ self.vapi.feature_gso_enable_disable(sw_if_index=self.pg0.sw_if_index,
+ enable_disable=0)
+ self.vapi.feature_gso_enable_disable(
+ sw_if_index=self.ipip4.sw_if_index,
+ enable_disable=1)
+
+ rxs = self.send_and_expect(self.pg2, 5*[p47], self.pg0, 225)
+ size = 0
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg0.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
+ self.assertEqual(rx[IP].src, self.pg0.local_ip4)
+ self.assertEqual(rx[IP].dst, self.pg0.remote_ip4)
+ self.assert_ip_checksum_valid(rx)
+ self.assertEqual(rx[IP].proto, 4) # ipencap
+ inner = rx[IP].payload
+ self.assertEqual(rx[IP].len - 20, len(inner))
+ self.assertEqual(inner[IP].src, self.pg2.remote_ip4)
+ self.assertEqual(inner[IP].dst, "172.16.10.3")
+ self.assert_ip_checksum_valid(inner)
+ self.assert_tcp_checksum_valid(inner)
+ payload_len = inner[IP].len - 20 - 20
+ self.assertEqual(payload_len, len(inner[Raw]))
+ size += payload_len
+ self.assertEqual(size, 65200*5)
+
+ #
+ # disable ipip4
+ #
+ self.vapi.feature_gso_enable_disable(
+ sw_if_index=self.ipip4.sw_if_index,
+ enable_disable=0)
+ self.ip4_via_ip4_tunnel.remove_vpp_config()
+ self.ip6_via_ip4_tunnel.remove_vpp_config()
+ self.ipip4.remove_vpp_config()
+
+ #
+ # enable ipip6
+ #
+ self.vapi.feature_gso_enable_disable(sw_if_index=self.pg0.sw_if_index,
+ enable_disable=1)
+ self.ipip6.add_vpp_config()
+
+ # Set interface up and enable IP on it
+ self.ipip6.admin_up()
+ self.ipip6.set_unnumbered(self.pg0.sw_if_index)
+
+ # Add IPv4 routes via tunnel interface
+ self.ip4_via_ip6_tunnel = VppIpRoute(
+ self, "172.16.10.0", 24,
+ [VppRoutePath("0.0.0.0",
+ self.ipip6.sw_if_index,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_IP4)])
+ self.ip4_via_ip6_tunnel.add_vpp_config()
+
+ #
+ # IPv6/IPv4 - IPIP
+ #
+ p48 = (Ether(src=self.pg2.remote_mac, dst="02:fe:60:1e:a2:79") /
+ IP(src=self.pg2.remote_ip4, dst="172.16.10.3", flags='DF') /
+ TCP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 65200))
+
+ rxs = self.send_and_expect(self.pg2, 5*[p48], self.pg0, 225)
+ size = 0
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg0.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
+ self.assertEqual(rx[IPv6].src, self.pg0.local_ip6)
+ self.assertEqual(rx[IPv6].dst, self.pg0.remote_ip6)
+ self.assertEqual(ipv6nh[rx[IPv6].nh], "IP")
+ inner = rx[IPv6].payload
+ self.assertEqual(rx[IPv6].plen, len(inner))
+ self.assertEqual(inner[IP].src, self.pg2.remote_ip4)
+ self.assertEqual(inner[IP].dst, "172.16.10.3")
+ self.assert_ip_checksum_valid(inner)
+ self.assert_tcp_checksum_valid(inner)
+ payload_len = inner[IP].len - 20 - 20
+ self.assertEqual(payload_len, len(inner[Raw]))
+ size += payload_len
+ self.assertEqual(size, 65200*5)
+
+ self.ip6_via_ip6_tunnel = VppIpRoute(
+ self, "fd01:10::", 64,
+ [VppRoutePath("::",
+ self.ipip6.sw_if_index,
+ proto=FibPathProto.FIB_PATH_NH_PROTO_IP6)])
+ self.ip6_via_ip6_tunnel.add_vpp_config()
+
+ #
+ # IPv6/IPv6 - IPIP
+ #
+ p68 = (Ether(src=self.pg2.remote_mac, dst="02:fe:60:1e:a2:79") /
+ IPv6(src=self.pg2.remote_ip6, dst="fd01:10::3") /
+ TCP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 65200))
+
+ rxs = self.send_and_expect(self.pg2, 5*[p68], self.pg0, 225)
+ size = 0
+ for rx in rxs:
+ self.assertEqual(rx[Ether].src, self.pg0.local_mac)
+ self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
+ self.assertEqual(rx[IPv6].src, self.pg0.local_ip6)
+ self.assertEqual(rx[IPv6].dst, self.pg0.remote_ip6)
+ self.assertEqual(ipv6nh[rx[IPv6].nh], "IPv6")
+ inner = rx[IPv6].payload
+ self.assertEqual(rx[IPv6].plen, len(inner))
+ self.assertEqual(inner[IPv6].src, self.pg2.remote_ip6)
+ self.assertEqual(inner[IPv6].dst, "fd01:10::3")
+ self.assert_tcp_checksum_valid(inner)
+ payload_len = inner[IPv6].plen - 20
+ self.assertEqual(payload_len, len(inner[Raw]))
+ size += payload_len
+ self.assertEqual(size, 65200*5)
+
+ #
+ # disable ipip6
+ #
+ self.ip4_via_ip6_tunnel.remove_vpp_config()
+ self.ip6_via_ip6_tunnel.remove_vpp_config()
+ self.ipip6.remove_vpp_config()
+
+ self.vapi.feature_gso_enable_disable(sw_if_index=self.pg0.sw_if_index,
+ enable_disable=0)
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_gtpu.py b/test/test_gtpu.py
new file mode 100644
index 00000000000..791067c0633
--- /dev/null
+++ b/test/test_gtpu.py
@@ -0,0 +1,413 @@
+#!/usr/bin/env python3
+
+import socket
+from util import ip4_range
+import unittest
+from framework import tag_fixme_vpp_workers
+from framework import VppTestCase, VppTestRunner
+from template_bd import BridgeDomain
+
+from scapy.layers.l2 import Ether
+from scapy.packet import Raw
+from scapy.layers.inet import IP, UDP
+from scapy.layers.inet6 import IPv6
+from scapy.contrib.gtp import GTP_U_Header
+from scapy.utils import atol
+
+import util
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_ip import INVALID_INDEX
+
+
+@tag_fixme_vpp_workers
+class TestGtpuUDP(VppTestCase):
+ """ GTPU UDP ports Test Case """
+
+ def setUp(self):
+ super(TestGtpuUDP, self).setUp()
+
+ self.dport = 2152
+
+ self.ip4_err = 0
+ self.ip6_err = 0
+
+ self.create_pg_interfaces(range(1))
+ for pg in self.pg_interfaces:
+ pg.admin_up()
+ self.pg0.config_ip4()
+ self.pg0.config_ip6()
+
+ def _check_udp_port_ip4(self, enabled=True):
+
+ pkt = (Ether(src=self.pg0.local_mac, dst=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
+ UDP(sport=self.dport, dport=self.dport, chksum=0))
+
+ self.pg0.add_stream(pkt)
+ self.pg_start()
+
+ err = self.statistics.get_counter(
+ '/err/ip4-udp-lookup/No listener for dst port')[0]
+
+ if enabled:
+ self.assertEqual(err, self.ip4_err)
+ else:
+ self.assertEqual(err, self.ip4_err + 1)
+
+ self.ip4_err = err
+
+ def _check_udp_port_ip6(self, enabled=True):
+
+ pkt = (Ether(src=self.pg0.local_mac, dst=self.pg0.remote_mac) /
+ IPv6(src=self.pg0.remote_ip6, dst=self.pg0.local_ip6) /
+ UDP(sport=self.dport, dport=self.dport, chksum=0))
+
+ self.pg0.add_stream(pkt)
+ self.pg_start()
+
+ err = self.statistics.get_counter(
+ '/err/ip6-udp-lookup/No listener for dst port')[0]
+
+ if enabled:
+ self.assertEqual(err, self.ip6_err)
+ else:
+ self.assertEqual(err, self.ip6_err + 1)
+
+ self.ip6_err = err
+
+ def test_udp_port(self):
+ """ test UDP ports
+ Check if there are no udp listeners before gtpu is enabled
+ """
+ # UDP ports should be disabled unless a tunnel is configured
+ self._check_udp_port_ip4(False)
+ self._check_udp_port_ip6(False)
+
+ r = self.vapi.gtpu_add_del_tunnel(is_add=True,
+ mcast_sw_if_index=0xFFFFFFFF,
+ decap_next_index=0xFFFFFFFF,
+ src_address=self.pg0.local_ip4,
+ dst_address=self.pg0.remote_ip4)
+
+ # UDP port 2152 enabled for ip4
+ self._check_udp_port_ip4()
+
+ r = self.vapi.gtpu_add_del_tunnel(is_add=True,
+ mcast_sw_if_index=0xFFFFFFFF,
+ decap_next_index=0xFFFFFFFF,
+ src_address=self.pg0.local_ip6,
+ dst_address=self.pg0.remote_ip6)
+
+ # UDP port 2152 enabled for ip6
+ self._check_udp_port_ip6()
+
+ r = self.vapi.gtpu_add_del_tunnel(is_add=False,
+ mcast_sw_if_index=0xFFFFFFFF,
+ decap_next_index=0xFFFFFFFF,
+ src_address=self.pg0.local_ip4,
+ dst_address=self.pg0.remote_ip4)
+
+ r = self.vapi.gtpu_add_del_tunnel(is_add=False,
+ mcast_sw_if_index=0xFFFFFFFF,
+ decap_next_index=0xFFFFFFFF,
+ src_address=self.pg0.local_ip6,
+ dst_address=self.pg0.remote_ip6)
+
+
+class TestGtpu(BridgeDomain, VppTestCase):
+ """ GTPU Test Case """
+
+ def __init__(self, *args):
+ BridgeDomain.__init__(self)
+ VppTestCase.__init__(self, *args)
+
+ def encapsulate(self, pkt, vni):
+ """
+ Encapsulate the original payload frame by adding GTPU header with its
+ UDP, IP and Ethernet fields
+ """
+ return (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
+ UDP(sport=self.dport, dport=self.dport, chksum=0) /
+ GTP_U_Header(teid=vni, gtp_type=self.gtp_type, length=150) /
+ pkt)
+
+ def ip_range(self, start, end):
+ """ range of remote ip's """
+ return ip4_range(self.pg0.remote_ip4, start, end)
+
+ def encap_mcast(self, pkt, src_ip, src_mac, vni):
+ """
+ Encapsulate the original payload frame by adding GTPU header with its
+ UDP, IP and Ethernet fields
+ """
+ return (Ether(src=src_mac, dst=self.mcast_mac) /
+ IP(src=src_ip, dst=self.mcast_ip4) /
+ UDP(sport=self.dport, dport=self.dport, chksum=0) /
+ GTP_U_Header(teid=vni, gtp_type=self.gtp_type, length=150) /
+ pkt)
+
+ def decapsulate(self, pkt):
+ """
+ Decapsulate the original payload frame by removing GTPU header
+ """
+ return pkt[GTP_U_Header].payload
+
+ # Method for checking GTPU encapsulation.
+ #
+ def check_encapsulation(self, pkt, vni, local_only=False, mcast_pkt=False):
+ # Verify source MAC is VPP_MAC and destination MAC is MY_MAC resolved
+ # by VPP using ARP.
+ self.assertEqual(pkt[Ether].src, self.pg0.local_mac)
+ if not local_only:
+ if not mcast_pkt:
+ self.assertEqual(pkt[Ether].dst, self.pg0.remote_mac)
+ else:
+ self.assertEqual(pkt[Ether].dst, type(self).mcast_mac)
+ # Verify GTPU tunnel source IP is VPP_IP and destination IP is MY_IP.
+ self.assertEqual(pkt[IP].src, self.pg0.local_ip4)
+ if not local_only:
+ if not mcast_pkt:
+ self.assertEqual(pkt[IP].dst, self.pg0.remote_ip4)
+ else:
+ self.assertEqual(pkt[IP].dst, type(self).mcast_ip4)
+ # Verify UDP destination port is GTPU 2152, source UDP port could be
+ # arbitrary.
+ self.assertEqual(pkt[UDP].dport, type(self).dport)
+ # Verify teid
+ self.assertEqual(pkt[GTP_U_Header].teid, vni)
+
+ def test_encap(self):
+ """ Encapsulation test
+ Send frames from pg1
+ Verify receipt of encapsulated frames on pg0
+ """
+ self.pg1.add_stream([self.frame_reply])
+
+ self.pg0.enable_capture()
+
+ self.pg_start()
+
+ # Pick first received frame and check if it's correctly encapsulated.
+ out = self.pg0.get_capture(1)
+ pkt = out[0]
+ self.check_encapsulation(pkt, self.single_tunnel_vni)
+
+ # payload = self.decapsulate(pkt)
+ # self.assert_eq_pkts(payload, self.frame_reply)
+
+ def test_ucast_flood(self):
+ """ Unicast flood test
+ Send frames from pg3
+ Verify receipt of encapsulated frames on pg0
+ """
+ self.pg3.add_stream([self.frame_reply])
+
+ self.pg0.enable_capture()
+
+ self.pg_start()
+
+ # Get packet from each tunnel and assert it's correctly encapsulated.
+ out = self.pg0.get_capture(self.n_ucast_tunnels)
+ for pkt in out:
+ self.check_encapsulation(pkt, self.ucast_flood_bd, True)
+ # payload = self.decapsulate(pkt)
+ # self.assert_eq_pkts(payload, self.frame_reply)
+
+ def test_mcast_flood(self):
+ """ Multicast flood test
+ Send frames from pg2
+ Verify receipt of encapsulated frames on pg0
+ """
+ self.pg2.add_stream([self.frame_reply])
+
+ self.pg0.enable_capture()
+
+ self.pg_start()
+
+ # Pick first received frame and check if it's correctly encapsulated.
+ out = self.pg0.get_capture(1)
+ pkt = out[0]
+ self.check_encapsulation(pkt, self.mcast_flood_bd,
+ local_only=False, mcast_pkt=True)
+
+ # payload = self.decapsulate(pkt)
+ # self.assert_eq_pkts(payload, self.frame_reply)
+
+ @classmethod
+ def create_gtpu_flood_test_bd(cls, teid, n_ucast_tunnels):
+ # Create 10 ucast gtpu tunnels under bd
+ ip_range_start = 10
+ ip_range_end = ip_range_start + n_ucast_tunnels
+ next_hop_address = cls.pg0.remote_ip4
+ for dest_ip4 in ip4_range(next_hop_address, ip_range_start,
+ ip_range_end):
+ # add host route so dest_ip4 will not be resolved
+ rip = VppIpRoute(cls, dest_ip4, 32,
+ [VppRoutePath(next_hop_address,
+ INVALID_INDEX)],
+ register=False)
+ rip.add_vpp_config()
+ r = cls.vapi.gtpu_add_del_tunnel(
+ is_add=True,
+ mcast_sw_if_index=0xFFFFFFFF,
+ decap_next_index=0xFFFFFFFF,
+ src_address=cls.pg0.local_ip4,
+ dst_address=dest_ip4,
+ teid=teid)
+ cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
+ bd_id=teid)
+
+ @classmethod
+ def add_del_shared_mcast_dst_load(cls, is_add):
+ """
+ add or del tunnels sharing the same mcast dst
+ to test gtpu ref_count mechanism
+ """
+ n_shared_dst_tunnels = 20
+ teid_start = 1000
+ teid_end = teid_start + n_shared_dst_tunnels
+ for teid in range(teid_start, teid_end):
+ r = cls.vapi.gtpu_add_del_tunnel(
+ decap_next_index=0xFFFFFFFF,
+ src_address=cls.pg0.local_ip4,
+ dst_address=cls.mcast_ip4,
+ mcast_sw_if_index=1,
+ teid=teid,
+ is_add=is_add)
+ if r.sw_if_index == 0xffffffff:
+ raise ValueError("bad sw_if_index: ~0")
+
+ @classmethod
+ def add_shared_mcast_dst_load(cls):
+ cls.add_del_shared_mcast_dst_load(is_add=1)
+
+ @classmethod
+ def del_shared_mcast_dst_load(cls):
+ cls.add_del_shared_mcast_dst_load(is_add=0)
+
+ @classmethod
+ def add_del_mcast_tunnels_load(cls, is_add):
+ """
+ add or del tunnels to test gtpu stability
+ """
+ n_distinct_dst_tunnels = 20
+ ip_range_start = 10
+ ip_range_end = ip_range_start + n_distinct_dst_tunnels
+ for dest_ip4 in ip4_range(cls.mcast_ip4, ip_range_start,
+ ip_range_end):
+ teid = int(dest_ip4.split('.')[3])
+ cls.vapi.gtpu_add_del_tunnel(
+ decap_next_index=0xFFFFFFFF,
+ src_address=cls.pg0.local_ip4,
+ dst_address=dest_ip4,
+ mcast_sw_if_index=1,
+ teid=teid,
+ is_add=is_add)
+
+ @classmethod
+ def add_mcast_tunnels_load(cls):
+ cls.add_del_mcast_tunnels_load(is_add=1)
+
+ @classmethod
+ def del_mcast_tunnels_load(cls):
+ cls.add_del_mcast_tunnels_load(is_add=0)
+
+ # Class method to start the GTPU test case.
+ # Overrides setUpClass method in VppTestCase class.
+ # Python try..except statement is used to ensure that the tear down of
+ # the class will be executed even if exception is raised.
+ # @param cls The class pointer.
+ @classmethod
+ def setUpClass(cls):
+ super(TestGtpu, cls).setUpClass()
+
+ try:
+ cls.dport = 2152
+ cls.gtp_type = 0xff
+
+ # Create 2 pg interfaces.
+ cls.create_pg_interfaces(range(4))
+ for pg in cls.pg_interfaces:
+ pg.admin_up()
+
+ # Configure IPv4 addresses on VPP pg0.
+ cls.pg0.config_ip4()
+
+ # Resolve MAC address for VPP's IP address on pg0.
+ cls.pg0.resolve_arp()
+
+ # Our Multicast address
+ cls.mcast_ip4 = '239.1.1.1'
+ cls.mcast_mac = util.mcast_ip_to_mac(cls.mcast_ip4)
+
+ # Create GTPU VTEP on VPP pg0, and put gtpu_tunnel0 and pg1
+ # into BD.
+ cls.single_tunnel_bd = 11
+ cls.single_tunnel_vni = 11
+ r = cls.vapi.gtpu_add_del_tunnel(
+ is_add=True,
+ mcast_sw_if_index=0xFFFFFFFF,
+ decap_next_index=0xFFFFFFFF,
+ src_address=cls.pg0.local_ip4,
+ dst_address=cls.pg0.remote_ip4,
+ teid=cls.single_tunnel_vni)
+ cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
+ bd_id=cls.single_tunnel_bd)
+ cls.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=cls.pg1.sw_if_index, bd_id=cls.single_tunnel_bd)
+
+ # Setup teid 2 to test multicast flooding
+ cls.n_ucast_tunnels = 10
+ cls.mcast_flood_bd = 12
+ cls.create_gtpu_flood_test_bd(cls.mcast_flood_bd,
+ cls.n_ucast_tunnels)
+ r = cls.vapi.gtpu_add_del_tunnel(
+ is_add=True,
+ src_address=cls.pg0.local_ip4,
+ dst_address=cls.mcast_ip4,
+ mcast_sw_if_index=1,
+ decap_next_index=0xFFFFFFFF,
+ teid=cls.mcast_flood_bd)
+ cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
+ bd_id=cls.mcast_flood_bd)
+ cls.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=cls.pg2.sw_if_index, bd_id=cls.mcast_flood_bd)
+
+ # Add and delete mcast tunnels to check stability
+ cls.add_shared_mcast_dst_load()
+ cls.add_mcast_tunnels_load()
+ cls.del_shared_mcast_dst_load()
+ cls.del_mcast_tunnels_load()
+
+ # Setup teid 3 to test unicast flooding
+ cls.ucast_flood_bd = 13
+ cls.create_gtpu_flood_test_bd(cls.ucast_flood_bd,
+ cls.n_ucast_tunnels)
+ cls.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=cls.pg3.sw_if_index, bd_id=cls.ucast_flood_bd)
+ except Exception:
+ super(TestGtpu, cls).tearDownClass()
+ raise
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestGtpu, cls).tearDownClass()
+
+ # Method to define VPP actions before tear down of the test case.
+ # Overrides tearDown method in VppTestCase class.
+ # @param self The object pointer.
+ def tearDown(self):
+ super(TestGtpu, self).tearDown()
+
+ def show_commands_at_teardown(self):
+ self.logger.info(self.vapi.cli("show bridge-domain 11 detail"))
+ self.logger.info(self.vapi.cli("show bridge-domain 12 detail"))
+ self.logger.info(self.vapi.cli("show bridge-domain 13 detail"))
+ self.logger.info(self.vapi.cli("show int"))
+ self.logger.info(self.vapi.cli("show gtpu tunnel"))
+ self.logger.info(self.vapi.cli("show trace"))
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_igmp.py b/test/test_igmp.py
new file mode 100644
index 00000000000..8053bc3d544
--- /dev/null
+++ b/test/test_igmp.py
@@ -0,0 +1,837 @@
+#!/usr/bin/env python3
+
+import unittest
+
+from scapy.layers.l2 import Ether
+from scapy.packet import Raw
+from scapy.layers.inet import IP, IPOption
+from scapy.contrib.igmpv3 import IGMPv3, IGMPv3gr, IGMPv3mq, IGMPv3mr
+
+from framework import tag_fixme_vpp_workers
+from framework import VppTestCase, VppTestRunner, running_extended_tests
+from vpp_igmp import find_igmp_state, IGMP_FILTER, IgmpRecord, IGMP_MODE, \
+ IgmpSG, VppHostState, wait_for_igmp_event
+from vpp_ip_route import find_mroute, VppIpTable
+
+
+class IgmpMode:
+ HOST = 1
+ ROUTER = 0
+
+
+@tag_fixme_vpp_workers
+class TestIgmp(VppTestCase):
+ """ IGMP Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestIgmp, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestIgmp, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestIgmp, self).setUp()
+
+ self.create_pg_interfaces(range(4))
+ self.sg_list = []
+ self.config_list = []
+
+ self.ip_addr = []
+ self.ip_table = VppIpTable(self, 1)
+ self.ip_table.add_vpp_config()
+
+ for pg in self.pg_interfaces[2:]:
+ pg.set_table_ip4(1)
+ for pg in self.pg_interfaces:
+ pg.admin_up()
+ pg.config_ip4()
+ pg.resolve_arp()
+
+ def tearDown(self):
+ for pg in self.pg_interfaces:
+ self.vapi.igmp_clear_interface(pg.sw_if_index)
+ pg.unconfig_ip4()
+ pg.set_table_ip4(0)
+ pg.admin_down()
+ super(TestIgmp, self).tearDown()
+
+ def send(self, ti, pkts):
+ ti.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ def test_igmp_flush(self):
+ """ IGMP Link Up/down and Flush """
+
+ #
+ # FIX THIS. Link down.
+ #
+
+ def test_igmp_enable(self):
+ """ IGMP enable/disable on an interface
+
+ check for the addition/removal of the IGMP mroutes """
+
+ self.vapi.igmp_enable_disable(self.pg0.sw_if_index, 1, IGMP_MODE.HOST)
+ self.vapi.igmp_enable_disable(self.pg1.sw_if_index, 1, IGMP_MODE.HOST)
+
+ self.assertTrue(find_mroute(self, "224.0.0.1", "0.0.0.0", 32))
+ self.assertTrue(find_mroute(self, "224.0.0.22", "0.0.0.0", 32))
+
+ self.vapi.igmp_enable_disable(self.pg2.sw_if_index, 1, IGMP_MODE.HOST)
+ self.vapi.igmp_enable_disable(self.pg3.sw_if_index, 1, IGMP_MODE.HOST)
+
+ self.assertTrue(find_mroute(self, "224.0.0.1", "0.0.0.0", 32,
+ table_id=1))
+ self.assertTrue(find_mroute(self, "224.0.0.22", "0.0.0.0", 32,
+ table_id=1))
+ self.vapi.igmp_enable_disable(self.pg0.sw_if_index, 0, IGMP_MODE.HOST)
+ self.vapi.igmp_enable_disable(self.pg1.sw_if_index, 0, IGMP_MODE.HOST)
+ self.vapi.igmp_enable_disable(self.pg2.sw_if_index, 0, IGMP_MODE.HOST)
+ self.vapi.igmp_enable_disable(self.pg3.sw_if_index, 0, IGMP_MODE.HOST)
+
+ self.assertTrue(find_mroute(self, "224.0.0.1", "0.0.0.0", 32))
+ self.assertFalse(find_mroute(self, "224.0.0.22", "0.0.0.0", 32))
+ self.assertTrue(find_mroute(self, "224.0.0.1", "0.0.0.0", 32,
+ table_id=1))
+ self.assertFalse(find_mroute(self, "224.0.0.22", "0.0.0.0", 32,
+ table_id=1))
+
+ def verify_general_query(self, p):
+ ip = p[IP]
+ self.assertEqual(len(ip.options), 1)
+ self.assertEqual(ip.options[0].option, 20)
+ self.assertEqual(ip.dst, "224.0.0.1")
+ self.assertEqual(ip.proto, 2)
+ igmp = p[IGMPv3]
+ self.assertEqual(igmp.type, 0x11)
+ self.assertEqual(igmp.gaddr, "0.0.0.0")
+
+ def verify_group_query(self, p, grp, srcs):
+ ip = p[IP]
+ self.assertEqual(ip.dst, grp)
+ self.assertEqual(ip.proto, 2)
+ self.assertEqual(len(ip.options), 1)
+ self.assertEqual(ip.options[0].option, 20)
+ self.assertEqual(ip.proto, 2)
+ igmp = p[IGMPv3]
+ self.assertEqual(igmp.type, 0x11)
+ self.assertEqual(igmp.gaddr, grp)
+
+ def verify_report(self, rx, records):
+ ip = rx[IP]
+ self.assertEqual(rx[IP].dst, "224.0.0.22")
+ self.assertEqual(len(ip.options), 1)
+ self.assertEqual(ip.options[0].option, 20)
+ self.assertEqual(ip.proto, 2)
+ self.assertEqual(IGMPv3.igmpv3types[rx[IGMPv3].type],
+ "Version 3 Membership Report")
+ self.assertEqual(rx[IGMPv3mr].numgrp, len(records))
+
+ received = rx[IGMPv3mr].records
+
+ for ii in range(len(records)):
+ gr = received[ii]
+ r = records[ii]
+ self.assertEqual(IGMPv3gr.igmpv3grtypes[gr.rtype], r.type)
+ self.assertEqual(gr.numsrc, len(r.sg.saddrs))
+ self.assertEqual(gr.maddr, r.sg.gaddr)
+ self.assertEqual(len(gr.srcaddrs), len(r.sg.saddrs))
+
+ self.assertEqual(sorted(gr.srcaddrs),
+ sorted(r.sg.saddrs))
+
+ def add_group(self, itf, sg, n_pkts=2):
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ hs = VppHostState(self,
+ IGMP_FILTER.INCLUDE,
+ itf.sw_if_index,
+ sg)
+ hs.add_vpp_config()
+
+ capture = itf.get_capture(n_pkts, timeout=10)
+
+ # reports are transmitted twice due to default rebostness value=2
+ self.verify_report(capture[0],
+ [IgmpRecord(sg, "Allow New Sources")]),
+ self.verify_report(capture[1],
+ [IgmpRecord(sg, "Allow New Sources")]),
+
+ return hs
+
+ def remove_group(self, hs):
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ hs.remove_vpp_config()
+
+ capture = self.pg0.get_capture(1, timeout=10)
+
+ self.verify_report(capture[0],
+ [IgmpRecord(hs.sg, "Block Old Sources")])
+
+ def test_igmp_host(self):
+ """ IGMP Host functions """
+
+ #
+ # Enable interface for host functions
+ #
+ self.vapi.igmp_enable_disable(self.pg0.sw_if_index,
+ 1,
+ IGMP_MODE.HOST)
+
+ #
+ # Add one S,G of state and expect a state-change event report
+ # indicating the addition of the S,G
+ #
+ h1 = self.add_group(self.pg0, IgmpSG("239.1.1.1", ["1.1.1.1"]))
+
+ # search for the corresponding state created in VPP
+ dump = self.vapi.igmp_dump(self.pg0.sw_if_index)
+ self.assertEqual(len(dump), 1)
+ self.assertTrue(find_igmp_state(dump, self.pg0,
+ "239.1.1.1", "1.1.1.1"))
+
+ #
+ # Send a general query (to the all router's address)
+ # expect VPP to respond with a membership report.
+ # Pad the query with 0 - some devices in the big wild
+ # internet are prone to this.
+ #
+ p_g = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst='224.0.0.1', tos=0xc0) /
+ IGMPv3(type="Membership Query", mrcode=100) /
+ IGMPv3mq(gaddr="0.0.0.0") /
+ Raw(b'\x00' * 10))
+
+ self.send(self.pg0, p_g)
+
+ capture = self.pg0.get_capture(1, timeout=10)
+ self.verify_report(capture[0],
+ [IgmpRecord(h1.sg, "Mode Is Include")])
+
+ #
+ # Group specific query
+ #
+ p_gs = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst='239.1.1.1', tos=0xc0,
+ options=[IPOption(copy_flag=1, optclass="control",
+ option="router_alert")]) /
+ IGMPv3(type="Membership Query", mrcode=100) /
+ IGMPv3mq(gaddr="239.1.1.1"))
+
+ self.send(self.pg0, p_gs)
+
+ capture = self.pg0.get_capture(1, timeout=10)
+ self.verify_report(capture[0],
+ [IgmpRecord(h1.sg, "Mode Is Include")])
+
+ #
+ # A group and source specific query, with the source matching
+ # the source VPP has
+ #
+ p_gs1 = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst='239.1.1.1', tos=0xc0,
+ options=[IPOption(copy_flag=1, optclass="control",
+ option="router_alert")]) /
+ IGMPv3(type="Membership Query", mrcode=100) /
+ IGMPv3mq(gaddr="239.1.1.1", srcaddrs=["1.1.1.1"]))
+
+ self.send(self.pg0, p_gs1)
+
+ capture = self.pg0.get_capture(1, timeout=10)
+ self.verify_report(capture[0],
+ [IgmpRecord(h1.sg, "Mode Is Include")])
+
+ #
+ # A group and source specific query that reports more sources
+ # than the packet actually has.
+ #
+ p_gs2 = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst='239.1.1.1', tos=0xc0,
+ options=[IPOption(copy_flag=1, optclass="control",
+ option="router_alert")]) /
+ IGMPv3(type="Membership Query", mrcode=100) /
+ IGMPv3mq(gaddr="239.1.1.1", numsrc=4, srcaddrs=["1.1.1.1"]))
+
+ self.send_and_assert_no_replies(self.pg0, p_gs2, timeout=10)
+
+ #
+ # A group and source specific query, with the source NOT matching
+ # the source VPP has. There should be no response.
+ #
+ p_gs2 = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst='239.1.1.1', tos=0xc0,
+ options=[IPOption(copy_flag=1, optclass="control",
+ option="router_alert")]) /
+ IGMPv3(type="Membership Query", mrcode=100) /
+ IGMPv3mq(gaddr="239.1.1.1", srcaddrs=["1.1.1.2"]))
+
+ self.send_and_assert_no_replies(self.pg0, p_gs2, timeout=10)
+
+ #
+ # A group and source specific query, with the multiple sources
+ # one of which matches the source VPP has.
+ # The report should contain only the source VPP has.
+ #
+ p_gs3 = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst='239.1.1.1', tos=0xc0,
+ options=[IPOption(copy_flag=1, optclass="control",
+ option="router_alert")]) /
+ IGMPv3(type="Membership Query", mrcode=100) /
+ IGMPv3mq(gaddr="239.1.1.1",
+ srcaddrs=["1.1.1.1", "1.1.1.2", "1.1.1.3"]))
+
+ self.send(self.pg0, p_gs3)
+
+ capture = self.pg0.get_capture(1, timeout=10)
+ self.verify_report(capture[0],
+ [IgmpRecord(h1.sg, "Mode Is Include")])
+
+ #
+ # Two source and group specific queries in quick succession, the
+ # first does not have VPPs source the second does. then vice-versa
+ #
+ self.send(self.pg0, [p_gs2, p_gs1])
+ capture = self.pg0.get_capture(1, timeout=10)
+ self.verify_report(capture[0],
+ [IgmpRecord(h1.sg, "Mode Is Include")])
+
+ self.send(self.pg0, [p_gs1, p_gs2])
+ capture = self.pg0.get_capture(1, timeout=10)
+ self.verify_report(capture[0],
+ [IgmpRecord(h1.sg, "Mode Is Include")])
+
+ #
+ # remove state, expect the report for the removal
+ #
+ self.remove_group(h1)
+
+ dump = self.vapi.igmp_dump()
+ self.assertFalse(dump)
+
+ #
+ # A group with multiple sources
+ #
+ h2 = self.add_group(self.pg0,
+ IgmpSG("239.1.1.1",
+ ["1.1.1.1", "1.1.1.2", "1.1.1.3"]))
+
+ # search for the corresponding state created in VPP
+ dump = self.vapi.igmp_dump(self.pg0.sw_if_index)
+ self.assertEqual(len(dump), 3)
+ for s in h2.sg.saddrs:
+ self.assertTrue(find_igmp_state(dump, self.pg0,
+ "239.1.1.1", s))
+ #
+ # Send a general query (to the all router's address)
+ # expect VPP to respond with a membership report will all sources
+ #
+ self.send(self.pg0, p_g)
+
+ capture = self.pg0.get_capture(1, timeout=10)
+ self.verify_report(capture[0],
+ [IgmpRecord(h2.sg, "Mode Is Include")])
+
+ #
+ # Group and source specific query; some present some not
+ #
+ p_gs = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst='239.1.1.1', tos=0xc0,
+ options=[IPOption(copy_flag=1, optclass="control",
+ option="router_alert")]) /
+ IGMPv3(type="Membership Query", mrcode=100) /
+ IGMPv3mq(gaddr="239.1.1.1",
+ srcaddrs=["1.1.1.1", "1.1.1.2", "1.1.1.4"]))
+
+ self.send(self.pg0, p_gs)
+
+ capture = self.pg0.get_capture(1, timeout=10)
+ self.verify_report(capture[0],
+ [IgmpRecord(
+ IgmpSG('239.1.1.1', ["1.1.1.1", "1.1.1.2"]),
+ "Mode Is Include")])
+
+ #
+ # add loads more groups
+ #
+ h3 = self.add_group(self.pg0,
+ IgmpSG("239.1.1.2",
+ ["2.1.1.1", "2.1.1.2", "2.1.1.3"]))
+ h4 = self.add_group(self.pg0,
+ IgmpSG("239.1.1.3",
+ ["3.1.1.1", "3.1.1.2", "3.1.1.3"]))
+ h5 = self.add_group(self.pg0,
+ IgmpSG("239.1.1.4",
+ ["4.1.1.1", "4.1.1.2", "4.1.1.3"]))
+ h6 = self.add_group(self.pg0,
+ IgmpSG("239.1.1.5",
+ ["5.1.1.1", "5.1.1.2", "5.1.1.3"]))
+ h7 = self.add_group(self.pg0,
+ IgmpSG("239.1.1.6",
+ ["6.1.1.1", "6.1.1.2",
+ "6.1.1.3", "6.1.1.4",
+ "6.1.1.5", "6.1.1.6",
+ "6.1.1.7", "6.1.1.8",
+ "6.1.1.9", "6.1.1.10",
+ "6.1.1.11", "6.1.1.12",
+ "6.1.1.13", "6.1.1.14",
+ "6.1.1.15", "6.1.1.16"]))
+
+ #
+ # general query.
+ # the order the groups come in is not important, so what is
+ # checked for is what VPP is sending today.
+ #
+ self.send(self.pg0, p_g)
+
+ capture = self.pg0.get_capture(1, timeout=10)
+
+ self.verify_report(capture[0],
+ [IgmpRecord(h3.sg, "Mode Is Include"),
+ IgmpRecord(h2.sg, "Mode Is Include"),
+ IgmpRecord(h6.sg, "Mode Is Include"),
+ IgmpRecord(h4.sg, "Mode Is Include"),
+ IgmpRecord(h5.sg, "Mode Is Include"),
+ IgmpRecord(h7.sg, "Mode Is Include")])
+
+ #
+ # modify a group to add and remove some sources
+ #
+ h7.sg = IgmpSG("239.1.1.6",
+ ["6.1.1.1", "6.1.1.2",
+ "6.1.1.5", "6.1.1.6",
+ "6.1.1.7", "6.1.1.8",
+ "6.1.1.9", "6.1.1.10",
+ "6.1.1.11", "6.1.1.12",
+ "6.1.1.13", "6.1.1.14",
+ "6.1.1.15", "6.1.1.16",
+ "6.1.1.17", "6.1.1.18"])
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ h7.add_vpp_config()
+
+ capture = self.pg0.get_capture(1, timeout=10)
+ self.verify_report(capture[0],
+ [IgmpRecord(IgmpSG("239.1.1.6",
+ ["6.1.1.17", "6.1.1.18"]),
+ "Allow New Sources"),
+ IgmpRecord(IgmpSG("239.1.1.6",
+ ["6.1.1.3", "6.1.1.4"]),
+ "Block Old Sources")])
+
+ #
+ # add an additional groups with many sources so that each group
+ # consumes the link MTU. We should therefore see multiple state
+ # state reports when queried.
+ #
+ self.vapi.sw_interface_set_mtu(self.pg0.sw_if_index, [560, 0, 0, 0])
+
+ src_list = []
+ for i in range(128):
+ src_list.append("10.1.1.%d" % i)
+
+ h8 = self.add_group(self.pg0,
+ IgmpSG("238.1.1.1", src_list))
+ h9 = self.add_group(self.pg0,
+ IgmpSG("238.1.1.2", src_list))
+
+ self.send(self.pg0, p_g)
+
+ capture = self.pg0.get_capture(4, timeout=10)
+
+ self.verify_report(capture[0],
+ [IgmpRecord(h3.sg, "Mode Is Include"),
+ IgmpRecord(h2.sg, "Mode Is Include"),
+ IgmpRecord(h6.sg, "Mode Is Include"),
+ IgmpRecord(h4.sg, "Mode Is Include"),
+ IgmpRecord(h5.sg, "Mode Is Include")])
+ self.verify_report(capture[1],
+ [IgmpRecord(h8.sg, "Mode Is Include")])
+ self.verify_report(capture[2],
+ [IgmpRecord(h7.sg, "Mode Is Include")])
+ self.verify_report(capture[3],
+ [IgmpRecord(h9.sg, "Mode Is Include")])
+
+ #
+ # drop the MTU further (so a 128 sized group won't fit)
+ #
+ self.vapi.sw_interface_set_mtu(self.pg0.sw_if_index, [512, 0, 0, 0])
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ h10 = VppHostState(self,
+ IGMP_FILTER.INCLUDE,
+ self.pg0.sw_if_index,
+ IgmpSG("238.1.1.3", src_list))
+ h10.add_vpp_config()
+
+ capture = self.pg0.get_capture(2, timeout=10)
+ # wait for a little bit
+ self.sleep(1)
+
+ #
+ # remove state, expect the report for the removal
+ # the dump should be empty
+ #
+ self.vapi.sw_interface_set_mtu(self.pg0.sw_if_index, [600, 0, 0, 0])
+ self.remove_group(h8)
+ self.remove_group(h9)
+ self.remove_group(h2)
+ self.remove_group(h3)
+ self.remove_group(h4)
+ self.remove_group(h5)
+ self.remove_group(h6)
+ self.remove_group(h7)
+ self.remove_group(h10)
+
+ self.logger.info(self.vapi.cli("sh igmp config"))
+ self.assertFalse(self.vapi.igmp_dump())
+
+ #
+ # TODO
+ # ADD STATE ON MORE INTERFACES
+ #
+
+ self.vapi.igmp_enable_disable(self.pg0.sw_if_index,
+ 0,
+ IGMP_MODE.HOST)
+
+ def test_igmp_router(self):
+ """ IGMP Router Functions """
+
+ #
+ # Drop reports when not enabled
+ #
+ p_j = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst="224.0.0.22", tos=0xc0, ttl=1,
+ options=[IPOption(copy_flag=1, optclass="control",
+ option="router_alert")]) /
+ IGMPv3(type="Version 3 Membership Report") /
+ IGMPv3mr(numgrp=1) /
+ IGMPv3gr(rtype="Allow New Sources",
+ maddr="239.1.1.1", srcaddrs=["10.1.1.1", "10.1.1.2"]))
+ p_l = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst="224.0.0.22", tos=0xc0,
+ options=[IPOption(copy_flag=1, optclass="control",
+ option="router_alert")]) /
+ IGMPv3(type="Version 3 Membership Report") /
+ IGMPv3mr(numgrp=1) /
+ IGMPv3gr(rtype="Block Old Sources",
+ maddr="239.1.1.1", srcaddrs=["10.1.1.1", "10.1.1.2"]))
+
+ self.send(self.pg0, p_j)
+ self.assertFalse(self.vapi.igmp_dump())
+
+ #
+ # drop the default timer values so these tests execute in a
+ # reasonable time frame
+ #
+ self.vapi.cli("test igmp timers query 1 src 3 leave 1")
+
+ #
+ # enable router functions on the interface
+ #
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.vapi.igmp_enable_disable(self.pg0.sw_if_index,
+ 1,
+ IGMP_MODE.ROUTER)
+ self.vapi.want_igmp_events(1)
+
+ #
+ # wait for router to send general query
+ #
+ for ii in range(3):
+ capture = self.pg0.get_capture(1, timeout=2)
+ self.verify_general_query(capture[0])
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ #
+ # re-send the report. VPP should now hold state for the new group
+ # VPP sends a notification that a new group has been joined
+ #
+ self.send(self.pg0, p_j)
+
+ self.assertTrue(wait_for_igmp_event(self, 1, self.pg0,
+ "239.1.1.1", "10.1.1.1", 1))
+ self.assertTrue(wait_for_igmp_event(self, 1, self.pg0,
+ "239.1.1.1", "10.1.1.2", 1))
+ dump = self.vapi.igmp_dump(self.pg0.sw_if_index)
+ self.assertEqual(len(dump), 2)
+ self.assertTrue(find_igmp_state(dump, self.pg0,
+ "239.1.1.1", "10.1.1.1"))
+ self.assertTrue(find_igmp_state(dump, self.pg0,
+ "239.1.1.1", "10.1.1.2"))
+
+ #
+ # wait for the per-source timer to expire
+ # the state should be reaped
+ # VPP sends a notification that the group has been left
+ #
+ self.assertTrue(wait_for_igmp_event(self, 4, self.pg0,
+ "239.1.1.1", "10.1.1.1", 0))
+ self.assertTrue(wait_for_igmp_event(self, 1, self.pg0,
+ "239.1.1.1", "10.1.1.2", 0))
+ self.assertFalse(self.vapi.igmp_dump())
+
+ #
+ # resend the join. wait for two queries and then send a current-state
+ # record to include all sources. this should reset the expiry time
+ # on the sources and thus they will still be present in 2 seconds time.
+ # If the source timer was not refreshed, then the state would have
+ # expired in 3 seconds.
+ #
+ self.send(self.pg0, p_j)
+ self.assertTrue(wait_for_igmp_event(self, 1, self.pg0,
+ "239.1.1.1", "10.1.1.1", 1))
+ self.assertTrue(wait_for_igmp_event(self, 1, self.pg0,
+ "239.1.1.1", "10.1.1.2", 1))
+ dump = self.vapi.igmp_dump(self.pg0.sw_if_index)
+ self.assertEqual(len(dump), 2)
+
+ capture = self.pg0.get_capture(2, timeout=3)
+ self.verify_general_query(capture[0])
+ self.verify_general_query(capture[1])
+
+ p_cs = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst="224.0.0.22", tos=0xc0,
+ options=[IPOption(copy_flag=1, optclass="control",
+ option="router_alert")]) /
+ IGMPv3(type="Version 3 Membership Report") /
+ IGMPv3mr(numgrp=1) /
+ IGMPv3gr(rtype="Mode Is Include",
+ maddr="239.1.1.1", srcaddrs=["10.1.1.1", "10.1.1.2"]))
+
+ self.send(self.pg0, p_cs)
+
+ self.sleep(2)
+ dump = self.vapi.igmp_dump(self.pg0.sw_if_index)
+ self.assertEqual(len(dump), 2)
+ self.assertTrue(find_igmp_state(dump, self.pg0,
+ "239.1.1.1", "10.1.1.1"))
+ self.assertTrue(find_igmp_state(dump, self.pg0,
+ "239.1.1.1", "10.1.1.2"))
+
+ #
+ # wait for the per-source timer to expire
+ # the state should be reaped
+ #
+ self.assertTrue(wait_for_igmp_event(self, 4, self.pg0,
+ "239.1.1.1", "10.1.1.1", 0))
+ self.assertTrue(wait_for_igmp_event(self, 1, self.pg0,
+ "239.1.1.1", "10.1.1.2", 0))
+ self.assertFalse(self.vapi.igmp_dump())
+
+ #
+ # resend the join, then a leave. Router sends a group+source
+ # specific query containing both sources
+ #
+ self.send(self.pg0, p_j)
+
+ self.assertTrue(wait_for_igmp_event(self, 1, self.pg0,
+ "239.1.1.1", "10.1.1.1", 1))
+ self.assertTrue(wait_for_igmp_event(self, 1, self.pg0,
+ "239.1.1.1", "10.1.1.2", 1))
+ dump = self.vapi.igmp_dump(self.pg0.sw_if_index)
+ self.assertEqual(len(dump), 2)
+
+ self.send(self.pg0, p_l)
+ capture = self.pg0.get_capture(1, timeout=3)
+ self.verify_group_query(capture[0], "239.1.1.1",
+ ["10.1.1.1", "10.1.1.2"])
+
+ #
+ # the group specific query drops the timeout to leave (=1) seconds
+ #
+ self.assertTrue(wait_for_igmp_event(self, 2, self.pg0,
+ "239.1.1.1", "10.1.1.1", 0))
+ self.assertTrue(wait_for_igmp_event(self, 1, self.pg0,
+ "239.1.1.1", "10.1.1.2", 0))
+ self.assertFalse(self.vapi.igmp_dump())
+ self.assertFalse(self.vapi.igmp_dump())
+
+ #
+ # a TO_EX({}) / IN_EX({}) is treated like a (*,G) join
+ #
+ p_j = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst="224.0.0.22", tos=0xc0, ttl=1,
+ options=[IPOption(copy_flag=1, optclass="control",
+ option="router_alert")]) /
+ IGMPv3(type="Version 3 Membership Report") /
+ IGMPv3mr(numgrp=1) /
+ IGMPv3gr(rtype="Change To Exclude Mode", maddr="239.1.1.2"))
+
+ self.send(self.pg0, p_j)
+
+ self.assertTrue(wait_for_igmp_event(self, 1, self.pg0,
+ "239.1.1.2", "0.0.0.0", 1))
+
+ p_j = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst="224.0.0.22", tos=0xc0, ttl=1,
+ options=[IPOption(copy_flag=1, optclass="control",
+ option="router_alert")]) /
+ IGMPv3(type="Version 3 Membership Report") /
+ IGMPv3mr(numgrp=1) /
+ IGMPv3gr(rtype="Mode Is Exclude", maddr="239.1.1.3"))
+
+ self.send(self.pg0, p_j)
+
+ self.assertTrue(wait_for_igmp_event(self, 1, self.pg0,
+ "239.1.1.3", "0.0.0.0", 1))
+
+ #
+ # A 'allow sources' for {} should be ignored as it should
+ # never be sent.
+ #
+ p_j = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst="224.0.0.22", tos=0xc0, ttl=1,
+ options=[IPOption(copy_flag=1, optclass="control",
+ option="router_alert")]) /
+ IGMPv3(type="Version 3 Membership Report") /
+ IGMPv3mr(numgrp=1) /
+ IGMPv3gr(rtype="Allow New Sources", maddr="239.1.1.4"))
+
+ self.send(self.pg0, p_j)
+
+ dump = self.vapi.igmp_dump(self.pg0.sw_if_index)
+ self.assertTrue(find_igmp_state(dump, self.pg0,
+ "239.1.1.2", "0.0.0.0"))
+ self.assertTrue(find_igmp_state(dump, self.pg0,
+ "239.1.1.3", "0.0.0.0"))
+ self.assertFalse(find_igmp_state(dump, self.pg0,
+ "239.1.1.4", "0.0.0.0"))
+
+ #
+ # a TO_IN({}) and IS_IN({}) are treated like a (*,G) leave
+ #
+ self.vapi.cli("set logging class igmp level debug")
+ p_l = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst="224.0.0.22", tos=0xc0, ttl=1,
+ options=[IPOption(copy_flag=1, optclass="control",
+ option="router_alert")]) /
+ IGMPv3(type="Version 3 Membership Report") /
+ IGMPv3mr(numgrp=1) /
+ IGMPv3gr(rtype="Change To Include Mode", maddr="239.1.1.2"))
+
+ self.send(self.pg0, p_l)
+ self.assertTrue(wait_for_igmp_event(self, 2, self.pg0,
+ "239.1.1.2", "0.0.0.0", 0))
+
+ p_l = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst="224.0.0.22", tos=0xc0, ttl=1,
+ options=[IPOption(copy_flag=1, optclass="control",
+ option="router_alert")]) /
+ IGMPv3(type="Version 3 Membership Report") /
+ IGMPv3mr(numgrp=1) /
+ IGMPv3gr(rtype="Mode Is Include", maddr="239.1.1.3"))
+
+ self.send(self.pg0, p_l)
+
+ self.assertTrue(wait_for_igmp_event(self, 2, self.pg0,
+ "239.1.1.3", "0.0.0.0", 0))
+ self.assertFalse(self.vapi.igmp_dump(self.pg0.sw_if_index))
+
+ #
+ # disable router config
+ #
+ self.vapi.igmp_enable_disable(self.pg0.sw_if_index,
+ 0,
+ IGMP_MODE.ROUTER)
+
+ def _create_igmpv3_pck(self, itf, rtype, maddr, srcaddrs):
+ p = (Ether(dst=itf.local_mac, src=itf.remote_mac) /
+ IP(src=itf.remote_ip4, dst="224.0.0.22", tos=0xc0, ttl=1,
+ options=[IPOption(copy_flag=1, optclass="control",
+ option="router_alert")]) /
+ IGMPv3(type="Version 3 Membership Report") /
+ IGMPv3mr(numgrp=1) /
+ IGMPv3gr(rtype=rtype,
+ maddr=maddr, srcaddrs=srcaddrs))
+ return p
+
+ def test_igmp_proxy_device(self):
+ """ IGMP proxy device """
+ self.pg2.admin_down()
+ self.pg2.unconfig_ip4()
+ self.pg2.set_table_ip4(0)
+ self.pg2.config_ip4()
+ self.pg2.admin_up()
+
+ self.vapi.cli('test igmp timers query 10 src 3 leave 1')
+
+ # enable IGMP
+ self.vapi.igmp_enable_disable(self.pg0.sw_if_index, 1, IGMP_MODE.HOST)
+ self.vapi.igmp_enable_disable(self.pg1.sw_if_index, 1,
+ IGMP_MODE.ROUTER)
+ self.vapi.igmp_enable_disable(self.pg2.sw_if_index, 1,
+ IGMP_MODE.ROUTER)
+
+ # create IGMP proxy device
+ self.vapi.igmp_proxy_device_add_del(0, self.pg0.sw_if_index, 1)
+ self.vapi.igmp_proxy_device_add_del_interface(0,
+ self.pg1.sw_if_index, 1)
+ self.vapi.igmp_proxy_device_add_del_interface(0,
+ self.pg2.sw_if_index, 1)
+
+ # send join on pg1. join should be proxied by pg0
+ p_j = self._create_igmpv3_pck(self.pg1, "Allow New Sources",
+ "239.1.1.1", ["10.1.1.1", "10.1.1.2"])
+ self.send(self.pg1, p_j)
+
+ capture = self.pg0.get_capture(1, timeout=1)
+ self.verify_report(capture[0], [IgmpRecord(IgmpSG("239.1.1.1",
+ ["10.1.1.1", "10.1.1.2"]), "Allow New Sources")])
+ self.assertTrue(find_mroute(self, "239.1.1.1", "0.0.0.0", 32))
+
+ # send join on pg2. join should be proxied by pg0.
+ # the group should contain only 10.1.1.3 as
+ # 10.1.1.1 was already reported
+ p_j = self._create_igmpv3_pck(self.pg2, "Allow New Sources",
+ "239.1.1.1", ["10.1.1.1", "10.1.1.3"])
+ self.send(self.pg2, p_j)
+
+ capture = self.pg0.get_capture(1, timeout=1)
+ self.verify_report(capture[0], [IgmpRecord(IgmpSG("239.1.1.1",
+ ["10.1.1.3"]), "Allow New Sources")])
+ self.assertTrue(find_mroute(self, "239.1.1.1", "0.0.0.0", 32))
+
+ # send leave on pg2. leave for 10.1.1.3 should be proxyed
+ # as pg2 was the only interface interested in 10.1.1.3
+ p_l = self._create_igmpv3_pck(self.pg2, "Block Old Sources",
+ "239.1.1.1", ["10.1.1.3"])
+ self.send(self.pg2, p_l)
+
+ capture = self.pg0.get_capture(1, timeout=2)
+ self.verify_report(capture[0], [IgmpRecord(IgmpSG("239.1.1.1",
+ ["10.1.1.3"]), "Block Old Sources")])
+ self.assertTrue(find_mroute(self, "239.1.1.1", "0.0.0.0", 32))
+
+ # disable igmp on pg1 (also removes interface from proxy device)
+ # proxy leave for 10.1.1.2. pg2 is still interested in 10.1.1.1
+ self.pg_enable_capture(self.pg_interfaces)
+ self.vapi.igmp_enable_disable(self.pg1.sw_if_index, 0,
+ IGMP_MODE.ROUTER)
+
+ capture = self.pg0.get_capture(1, timeout=1)
+ self.verify_report(capture[0], [IgmpRecord(IgmpSG("239.1.1.1",
+ ["10.1.1.2"]), "Block Old Sources")])
+ self.assertTrue(find_mroute(self, "239.1.1.1", "0.0.0.0", 32))
+
+ # disable IGMP on pg0 and pg1.
+ # disabling IGMP on pg0 (proxy device upstream interface)
+ # removes this proxy device
+ self.vapi.igmp_enable_disable(self.pg0.sw_if_index, 0, IGMP_MODE.HOST)
+ self.vapi.igmp_enable_disable(self.pg2.sw_if_index, 0,
+ IGMP_MODE.ROUTER)
+ self.assertFalse(find_mroute(self, "239.1.1.1", "0.0.0.0", 32))
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_ikev2.py b/test/test_ikev2.py
new file mode 100644
index 00000000000..558e8a02f87
--- /dev/null
+++ b/test/test_ikev2.py
@@ -0,0 +1,2059 @@
+import os
+import time
+from socket import inet_pton
+from cryptography import x509
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives import hashes, hmac
+from cryptography.hazmat.primitives.asymmetric import dh, padding
+from cryptography.hazmat.primitives.serialization import load_pem_private_key
+from cryptography.hazmat.primitives.ciphers import (
+ Cipher,
+ algorithms,
+ modes,
+)
+from ipaddress import IPv4Address, IPv6Address, ip_address
+import unittest
+from scapy.layers.ipsec import ESP
+from scapy.layers.inet import IP, UDP, Ether
+from scapy.layers.inet6 import IPv6
+from scapy.packet import raw, Raw
+from scapy.utils import long_converter
+from framework import tag_fixme_vpp_workers
+from framework import VppTestCase, VppTestRunner
+from vpp_ikev2 import Profile, IDType, AuthMethod
+from vpp_papi import VppEnum
+
+try:
+ text_type = unicode
+except NameError:
+ text_type = str
+
+KEY_PAD = b"Key Pad for IKEv2"
+SALT_SIZE = 4
+GCM_ICV_SIZE = 16
+GCM_IV_SIZE = 8
+
+
+# defined in rfc3526
+# tuple structure is (p, g, key_len)
+DH = {
+ '2048MODPgr': (long_converter("""
+ FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1
+ 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD
+ EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245
+ E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED
+ EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D
+ C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F
+ 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D
+ 670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B
+ E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9
+ DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510
+ 15728E5A 8AACAA68 FFFFFFFF FFFFFFFF"""), 2, 256),
+
+ '3072MODPgr': (long_converter("""
+ FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1
+ 29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD
+ EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245
+ E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED
+ EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D
+ C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F
+ 83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D
+ 670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B
+ E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9
+ DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510
+ 15728E5A 8AAAC42D AD33170D 04507A33 A85521AB DF1CBA64
+ ECFB8504 58DBEF0A 8AEA7157 5D060C7D B3970F85 A6E1E4C7
+ ABF5AE8C DB0933D7 1E8C94E0 4A25619D CEE3D226 1AD2EE6B
+ F12FFA06 D98A0864 D8760273 3EC86A64 521F2B18 177B200C
+ BBE11757 7A615D6C 770988C0 BAD946E2 08E24FA0 74E5AB31
+ 43DB5BFC E0FD108E 4B82D120 A93AD2CA FFFFFFFF FFFFFFFF"""), 2, 384)
+}
+
+
+class CryptoAlgo(object):
+ def __init__(self, name, cipher, mode):
+ self.name = name
+ self.cipher = cipher
+ self.mode = mode
+ if self.cipher is not None:
+ self.bs = self.cipher.block_size // 8
+
+ if self.name == 'AES-GCM-16ICV':
+ self.iv_len = GCM_IV_SIZE
+ else:
+ self.iv_len = self.bs
+
+ def encrypt(self, data, key, aad=None):
+ iv = os.urandom(self.iv_len)
+ if aad is None:
+ encryptor = Cipher(self.cipher(key), self.mode(iv),
+ default_backend()).encryptor()
+ return iv + encryptor.update(data) + encryptor.finalize()
+ else:
+ salt = key[-SALT_SIZE:]
+ nonce = salt + iv
+ encryptor = Cipher(self.cipher(key[:-SALT_SIZE]), self.mode(nonce),
+ default_backend()).encryptor()
+ encryptor.authenticate_additional_data(aad)
+ data = encryptor.update(data) + encryptor.finalize()
+ data += encryptor.tag[:GCM_ICV_SIZE]
+ return iv + data
+
+ def decrypt(self, data, key, aad=None, icv=None):
+ if aad is None:
+ iv = data[:self.iv_len]
+ ct = data[self.iv_len:]
+ decryptor = Cipher(algorithms.AES(key),
+ self.mode(iv),
+ default_backend()).decryptor()
+ return decryptor.update(ct) + decryptor.finalize()
+ else:
+ salt = key[-SALT_SIZE:]
+ nonce = salt + data[:GCM_IV_SIZE]
+ ct = data[GCM_IV_SIZE:]
+ key = key[:-SALT_SIZE]
+ decryptor = Cipher(algorithms.AES(key),
+ self.mode(nonce, icv, len(icv)),
+ default_backend()).decryptor()
+ decryptor.authenticate_additional_data(aad)
+ return decryptor.update(ct) + decryptor.finalize()
+
+ def pad(self, data):
+ pad_len = (len(data) // self.bs + 1) * self.bs - len(data)
+ data = data + b'\x00' * (pad_len - 1)
+ return data + bytes([pad_len - 1])
+
+
+class AuthAlgo(object):
+ def __init__(self, name, mac, mod, key_len, trunc_len=None):
+ self.name = name
+ self.mac = mac
+ self.mod = mod
+ self.key_len = key_len
+ self.trunc_len = trunc_len or key_len
+
+
+CRYPTO_ALGOS = {
+ 'NULL': CryptoAlgo('NULL', cipher=None, mode=None),
+ 'AES-CBC': CryptoAlgo('AES-CBC', cipher=algorithms.AES, mode=modes.CBC),
+ 'AES-GCM-16ICV': CryptoAlgo('AES-GCM-16ICV', cipher=algorithms.AES,
+ mode=modes.GCM),
+}
+
+AUTH_ALGOS = {
+ 'NULL': AuthAlgo('NULL', mac=None, mod=None, key_len=0, trunc_len=0),
+ 'HMAC-SHA1-96': AuthAlgo('HMAC-SHA1-96', hmac.HMAC, hashes.SHA1, 20, 12),
+ 'SHA2-256-128': AuthAlgo('SHA2-256-128', hmac.HMAC, hashes.SHA256, 32, 16),
+ 'SHA2-384-192': AuthAlgo('SHA2-384-192', hmac.HMAC, hashes.SHA256, 48, 24),
+ 'SHA2-512-256': AuthAlgo('SHA2-512-256', hmac.HMAC, hashes.SHA256, 64, 32),
+}
+
+PRF_ALGOS = {
+ 'NULL': AuthAlgo('NULL', mac=None, mod=None, key_len=0, trunc_len=0),
+ 'PRF_HMAC_SHA2_256': AuthAlgo('PRF_HMAC_SHA2_256', hmac.HMAC,
+ hashes.SHA256, 32),
+}
+
+CRYPTO_IDS = {
+ 12: 'AES-CBC',
+ 20: 'AES-GCM-16ICV',
+}
+
+INTEG_IDS = {
+ 2: 'HMAC-SHA1-96',
+ 12: 'SHA2-256-128',
+ 13: 'SHA2-384-192',
+ 14: 'SHA2-512-256',
+}
+
+
+class IKEv2ChildSA(object):
+ def __init__(self, local_ts, remote_ts, is_initiator):
+ spi = os.urandom(4)
+ if is_initiator:
+ self.ispi = spi
+ self.rspi = None
+ else:
+ self.rspi = spi
+ self.ispi = None
+ self.local_ts = local_ts
+ self.remote_ts = remote_ts
+
+
+class IKEv2SA(object):
+ def __init__(self, test, is_initiator=True, i_id=None, r_id=None,
+ spi=b'\x01\x02\x03\x04\x05\x06\x07\x08', id_type='fqdn',
+ nonce=None, auth_data=None, local_ts=None, remote_ts=None,
+ auth_method='shared-key', priv_key=None, i_natt=False,
+ r_natt=False, udp_encap=False):
+ self.udp_encap = udp_encap
+ self.i_natt = i_natt
+ self.r_natt = r_natt
+ if i_natt or r_natt:
+ self.sport = 4500
+ self.dport = 4500
+ else:
+ self.sport = 500
+ self.dport = 500
+ self.msg_id = 0
+ self.dh_params = None
+ self.test = test
+ self.priv_key = priv_key
+ self.is_initiator = is_initiator
+ nonce = nonce or os.urandom(32)
+ self.auth_data = auth_data
+ self.i_id = i_id
+ self.r_id = r_id
+ if isinstance(id_type, str):
+ self.id_type = IDType.value(id_type)
+ else:
+ self.id_type = id_type
+ self.auth_method = auth_method
+ if self.is_initiator:
+ self.rspi = 8 * b'\x00'
+ self.ispi = spi
+ self.i_nonce = nonce
+ else:
+ self.rspi = spi
+ self.ispi = 8 * b'\x00'
+ self.r_nonce = nonce
+ self.child_sas = [IKEv2ChildSA(local_ts, remote_ts,
+ self.is_initiator)]
+
+ def new_msg_id(self):
+ self.msg_id += 1
+ return self.msg_id
+
+ @property
+ def my_dh_pub_key(self):
+ if self.is_initiator:
+ return self.i_dh_data
+ return self.r_dh_data
+
+ @property
+ def peer_dh_pub_key(self):
+ if self.is_initiator:
+ return self.r_dh_data
+ return self.i_dh_data
+
+ @property
+ def natt(self):
+ return self.i_natt or self.r_natt
+
+ def compute_secret(self):
+ priv = self.dh_private_key
+ peer = self.peer_dh_pub_key
+ p, g, l = self.ike_group
+ return pow(int.from_bytes(peer, 'big'),
+ int.from_bytes(priv, 'big'), p).to_bytes(l, 'big')
+
+ def generate_dh_data(self):
+ # generate DH keys
+ if self.ike_dh not in DH:
+ raise NotImplementedError('%s not in DH group' % self.ike_dh)
+
+ if self.dh_params is None:
+ dhg = DH[self.ike_dh]
+ pn = dh.DHParameterNumbers(dhg[0], dhg[1])
+ self.dh_params = pn.parameters(default_backend())
+
+ priv = self.dh_params.generate_private_key()
+ pub = priv.public_key()
+ x = priv.private_numbers().x
+ self.dh_private_key = x.to_bytes(priv.key_size // 8, 'big')
+ y = pub.public_numbers().y
+
+ if self.is_initiator:
+ self.i_dh_data = y.to_bytes(pub.key_size // 8, 'big')
+ else:
+ self.r_dh_data = y.to_bytes(pub.key_size // 8, 'big')
+
+ def complete_dh_data(self):
+ self.dh_shared_secret = self.compute_secret()
+
+ def calc_child_keys(self):
+ prf = self.ike_prf_alg.mod()
+ s = self.i_nonce + self.r_nonce
+ c = self.child_sas[0]
+
+ encr_key_len = self.esp_crypto_key_len
+ integ_key_len = self.esp_integ_alg.key_len
+ salt_len = 0 if integ_key_len else 4
+
+ l = (integ_key_len * 2 +
+ encr_key_len * 2 +
+ salt_len * 2)
+ keymat = self.calc_prfplus(prf, self.sk_d, s, l)
+
+ pos = 0
+ c.sk_ei = keymat[pos:pos+encr_key_len]
+ pos += encr_key_len
+
+ if integ_key_len:
+ c.sk_ai = keymat[pos:pos+integ_key_len]
+ pos += integ_key_len
+ else:
+ c.salt_ei = keymat[pos:pos+salt_len]
+ pos += salt_len
+
+ c.sk_er = keymat[pos:pos+encr_key_len]
+ pos += encr_key_len
+
+ if integ_key_len:
+ c.sk_ar = keymat[pos:pos+integ_key_len]
+ pos += integ_key_len
+ else:
+ c.salt_er = keymat[pos:pos+salt_len]
+ pos += salt_len
+
+ def calc_prfplus(self, prf, key, seed, length):
+ r = b''
+ t = None
+ x = 1
+ while len(r) < length and x < 255:
+ if t is not None:
+ s = t
+ else:
+ s = b''
+ s = s + seed + bytes([x])
+ t = self.calc_prf(prf, key, s)
+ r = r + t
+ x = x + 1
+
+ if x == 255:
+ return None
+ return r
+
+ def calc_prf(self, prf, key, data):
+ h = self.ike_prf_alg.mac(key, prf, backend=default_backend())
+ h.update(data)
+ return h.finalize()
+
+ def calc_keys(self):
+ prf = self.ike_prf_alg.mod()
+ # SKEYSEED = prf(Ni | Nr, g^ir)
+ s = self.i_nonce + self.r_nonce
+ self.skeyseed = self.calc_prf(prf, s, self.dh_shared_secret)
+
+ # calculate S = Ni | Nr | SPIi SPIr
+ s = s + self.ispi + self.rspi
+
+ prf_key_trunc = self.ike_prf_alg.trunc_len
+ encr_key_len = self.ike_crypto_key_len
+ tr_prf_key_len = self.ike_prf_alg.key_len
+ integ_key_len = self.ike_integ_alg.key_len
+ if integ_key_len == 0:
+ salt_size = 4
+ else:
+ salt_size = 0
+
+ l = (prf_key_trunc +
+ integ_key_len * 2 +
+ encr_key_len * 2 +
+ tr_prf_key_len * 2 +
+ salt_size * 2)
+ keymat = self.calc_prfplus(prf, self.skeyseed, s, l)
+
+ pos = 0
+ self.sk_d = keymat[:pos+prf_key_trunc]
+ pos += prf_key_trunc
+
+ self.sk_ai = keymat[pos:pos+integ_key_len]
+ pos += integ_key_len
+ self.sk_ar = keymat[pos:pos+integ_key_len]
+ pos += integ_key_len
+
+ self.sk_ei = keymat[pos:pos+encr_key_len + salt_size]
+ pos += encr_key_len + salt_size
+ self.sk_er = keymat[pos:pos+encr_key_len + salt_size]
+ pos += encr_key_len + salt_size
+
+ self.sk_pi = keymat[pos:pos+tr_prf_key_len]
+ pos += tr_prf_key_len
+ self.sk_pr = keymat[pos:pos+tr_prf_key_len]
+
+ def generate_authmsg(self, prf, packet):
+ if self.is_initiator:
+ id = self.i_id
+ nonce = self.r_nonce
+ key = self.sk_pi
+ else:
+ id = self.r_id
+ nonce = self.i_nonce
+ key = self.sk_pr
+ data = bytes([self.id_type, 0, 0, 0]) + id
+ id_hash = self.calc_prf(prf, key, data)
+ return packet + nonce + id_hash
+
+ def auth_init(self):
+ prf = self.ike_prf_alg.mod()
+ if self.is_initiator:
+ packet = self.init_req_packet
+ else:
+ packet = self.init_resp_packet
+ authmsg = self.generate_authmsg(prf, raw(packet))
+ if self.auth_method == 'shared-key':
+ psk = self.calc_prf(prf, self.auth_data, KEY_PAD)
+ self.auth_data = self.calc_prf(prf, psk, authmsg)
+ elif self.auth_method == 'rsa-sig':
+ self.auth_data = self.priv_key.sign(authmsg, padding.PKCS1v15(),
+ hashes.SHA1())
+ else:
+ raise TypeError('unknown auth method type!')
+
+ def encrypt(self, data, aad=None):
+ data = self.ike_crypto_alg.pad(data)
+ return self.ike_crypto_alg.encrypt(data, self.my_cryptokey, aad)
+
+ @property
+ def peer_authkey(self):
+ if self.is_initiator:
+ return self.sk_ar
+ return self.sk_ai
+
+ @property
+ def my_authkey(self):
+ if self.is_initiator:
+ return self.sk_ai
+ return self.sk_ar
+
+ @property
+ def my_cryptokey(self):
+ if self.is_initiator:
+ return self.sk_ei
+ return self.sk_er
+
+ @property
+ def peer_cryptokey(self):
+ if self.is_initiator:
+ return self.sk_er
+ return self.sk_ei
+
+ def concat(self, alg, key_len):
+ return alg + '-' + str(key_len * 8)
+
+ @property
+ def vpp_ike_cypto_alg(self):
+ return self.concat(self.ike_crypto, self.ike_crypto_key_len)
+
+ @property
+ def vpp_esp_cypto_alg(self):
+ return self.concat(self.esp_crypto, self.esp_crypto_key_len)
+
+ def verify_hmac(self, ikemsg):
+ integ_trunc = self.ike_integ_alg.trunc_len
+ exp_hmac = ikemsg[-integ_trunc:]
+ data = ikemsg[:-integ_trunc]
+ computed_hmac = self.compute_hmac(self.ike_integ_alg.mod(),
+ self.peer_authkey, data)
+ self.test.assertEqual(computed_hmac[:integ_trunc], exp_hmac)
+
+ def compute_hmac(self, integ, key, data):
+ h = self.ike_integ_alg.mac(key, integ, backend=default_backend())
+ h.update(data)
+ return h.finalize()
+
+ def decrypt(self, data, aad=None, icv=None):
+ return self.ike_crypto_alg.decrypt(data, self.peer_cryptokey, aad, icv)
+
+ def hmac_and_decrypt(self, ike):
+ ep = ike[ikev2.IKEv2_payload_Encrypted]
+ if self.ike_crypto == 'AES-GCM-16ICV':
+ aad_len = len(ikev2.IKEv2_payload_Encrypted()) + len(ikev2.IKEv2())
+ ct = ep.load[:-GCM_ICV_SIZE]
+ tag = ep.load[-GCM_ICV_SIZE:]
+ plain = self.decrypt(ct, raw(ike)[:aad_len], tag)
+ else:
+ self.verify_hmac(raw(ike))
+ integ_trunc = self.ike_integ_alg.trunc_len
+
+ # remove ICV and decrypt payload
+ ct = ep.load[:-integ_trunc]
+ plain = self.decrypt(ct)
+ # remove padding
+ pad_len = plain[-1]
+ return plain[:-pad_len - 1]
+
+ def build_ts_addr(self, ts, version):
+ return {'starting_address_v' + version: ts['start_addr'],
+ 'ending_address_v' + version: ts['end_addr']}
+
+ def generate_ts(self, is_ip4):
+ c = self.child_sas[0]
+ ts_data = {'IP_protocol_ID': 0,
+ 'start_port': 0,
+ 'end_port': 0xffff}
+ if is_ip4:
+ ts_data.update(self.build_ts_addr(c.local_ts, '4'))
+ ts1 = ikev2.IPv4TrafficSelector(**ts_data)
+ ts_data.update(self.build_ts_addr(c.remote_ts, '4'))
+ ts2 = ikev2.IPv4TrafficSelector(**ts_data)
+ else:
+ ts_data.update(self.build_ts_addr(c.local_ts, '6'))
+ ts1 = ikev2.IPv6TrafficSelector(**ts_data)
+ ts_data.update(self.build_ts_addr(c.remote_ts, '6'))
+ ts2 = ikev2.IPv6TrafficSelector(**ts_data)
+
+ if self.is_initiator:
+ return ([ts1], [ts2])
+ return ([ts2], [ts1])
+
+ def set_ike_props(self, crypto, crypto_key_len, integ, prf, dh):
+ if crypto not in CRYPTO_ALGOS:
+ raise TypeError('unsupported encryption algo %r' % crypto)
+ self.ike_crypto = crypto
+ self.ike_crypto_alg = CRYPTO_ALGOS[crypto]
+ self.ike_crypto_key_len = crypto_key_len
+
+ if integ not in AUTH_ALGOS:
+ raise TypeError('unsupported auth algo %r' % integ)
+ self.ike_integ = None if integ == 'NULL' else integ
+ self.ike_integ_alg = AUTH_ALGOS[integ]
+
+ if prf not in PRF_ALGOS:
+ raise TypeError('unsupported prf algo %r' % prf)
+ self.ike_prf = prf
+ self.ike_prf_alg = PRF_ALGOS[prf]
+ self.ike_dh = dh
+ self.ike_group = DH[self.ike_dh]
+
+ def set_esp_props(self, crypto, crypto_key_len, integ):
+ self.esp_crypto_key_len = crypto_key_len
+ if crypto not in CRYPTO_ALGOS:
+ raise TypeError('unsupported encryption algo %r' % crypto)
+ self.esp_crypto = crypto
+ self.esp_crypto_alg = CRYPTO_ALGOS[crypto]
+
+ if integ not in AUTH_ALGOS:
+ raise TypeError('unsupported auth algo %r' % integ)
+ self.esp_integ = None if integ == 'NULL' else integ
+ self.esp_integ_alg = AUTH_ALGOS[integ]
+
+ def crypto_attr(self, key_len):
+ if self.ike_crypto in ['AES-CBC', 'AES-GCM-16ICV']:
+ return (0x800e << 16 | key_len << 3, 12)
+ else:
+ raise Exception('unsupported attribute type')
+
+ def ike_crypto_attr(self):
+ return self.crypto_attr(self.ike_crypto_key_len)
+
+ def esp_crypto_attr(self):
+ return self.crypto_attr(self.esp_crypto_key_len)
+
+ def compute_nat_sha1(self, ip, port, rspi=None):
+ if rspi is None:
+ rspi = self.rspi
+ data = self.ispi + rspi + ip + (port).to_bytes(2, 'big')
+ digest = hashes.Hash(hashes.SHA1(), backend=default_backend())
+ digest.update(data)
+ return digest.finalize()
+
+
+class IkePeer(VppTestCase):
+ """ common class for initiator and responder """
+
+ @classmethod
+ def setUpClass(cls):
+ import scapy.contrib.ikev2 as _ikev2
+ globals()['ikev2'] = _ikev2
+ super(IkePeer, cls).setUpClass()
+ cls.create_pg_interfaces(range(2))
+ for i in cls.pg_interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+ i.config_ip6()
+ i.resolve_ndp()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(IkePeer, cls).tearDownClass()
+
+ def tearDown(self):
+ super(IkePeer, self).tearDown()
+ if self.del_sa_from_responder:
+ self.initiate_del_sa_from_responder()
+ else:
+ self.initiate_del_sa_from_initiator()
+ r = self.vapi.ikev2_sa_dump()
+ self.assertEqual(len(r), 0)
+ sas = self.vapi.ipsec_sa_dump()
+ self.assertEqual(len(sas), 0)
+ self.p.remove_vpp_config()
+ self.assertIsNone(self.p.query_vpp_config())
+
+ def setUp(self):
+ super(IkePeer, self).setUp()
+ self.config_tc()
+ self.p.add_vpp_config()
+ self.assertIsNotNone(self.p.query_vpp_config())
+ if self.sa.is_initiator:
+ self.sa.generate_dh_data()
+ self.vapi.cli('ikev2 set logging level 4')
+ self.vapi.cli('event-lo clear')
+
+ def assert_counter(self, count, name, version='ip4'):
+ node_name = '/err/ikev2-%s/' % version + name
+ self.assertEqual(count, self.statistics.get_err_counter(node_name))
+
+ def create_rekey_request(self):
+ sa, first_payload = self.generate_auth_payload(is_rekey=True)
+ header = ikev2.IKEv2(
+ init_SPI=self.sa.ispi,
+ resp_SPI=self.sa.rspi, id=self.sa.new_msg_id(),
+ flags='Initiator', exch_type='CREATE_CHILD_SA')
+
+ ike_msg = self.encrypt_ike_msg(header, sa, first_payload)
+ return self.create_packet(self.pg0, ike_msg, self.sa.sport,
+ self.sa.dport, self.sa.natt, self.ip6)
+
+ def create_empty_request(self):
+ header = ikev2.IKEv2(init_SPI=self.sa.ispi, resp_SPI=self.sa.rspi,
+ id=self.sa.new_msg_id(), flags='Initiator',
+ exch_type='INFORMATIONAL',
+ next_payload='Encrypted')
+
+ msg = self.encrypt_ike_msg(header, b'', None)
+ return self.create_packet(self.pg0, msg, self.sa.sport,
+ self.sa.dport, self.sa.natt, self.ip6)
+
+ def create_packet(self, src_if, msg, sport=500, dport=500, natt=False,
+ use_ip6=False):
+ if use_ip6:
+ src_ip = src_if.remote_ip6
+ dst_ip = src_if.local_ip6
+ ip_layer = IPv6
+ else:
+ src_ip = src_if.remote_ip4
+ dst_ip = src_if.local_ip4
+ ip_layer = IP
+ res = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ ip_layer(src=src_ip, dst=dst_ip) /
+ UDP(sport=sport, dport=dport))
+ if natt:
+ # insert non ESP marker
+ res = res / Raw(b'\x00' * 4)
+ return res / msg
+
+ def verify_udp(self, udp):
+ self.assertEqual(udp.sport, self.sa.sport)
+ self.assertEqual(udp.dport, self.sa.dport)
+
+ def get_ike_header(self, packet):
+ try:
+ ih = packet[ikev2.IKEv2]
+ ih = self.verify_and_remove_non_esp_marker(ih)
+ except IndexError as e:
+ # this is a workaround for getting IKEv2 layer as both ikev2 and
+ # ipsec register for port 4500
+ esp = packet[ESP]
+ ih = self.verify_and_remove_non_esp_marker(esp)
+ self.assertEqual(ih.version, 0x20)
+ self.assertNotIn('Version', ih.flags)
+ return ih
+
+ def verify_and_remove_non_esp_marker(self, packet):
+ if self.sa.natt:
+ # if we are in nat traversal mode check for non esp marker
+ # and remove it
+ data = raw(packet)
+ self.assertEqual(data[:4], b'\x00' * 4)
+ return ikev2.IKEv2(data[4:])
+ else:
+ return packet
+
+ def encrypt_ike_msg(self, header, plain, first_payload):
+ if self.sa.ike_crypto == 'AES-GCM-16ICV':
+ data = self.sa.ike_crypto_alg.pad(raw(plain))
+ plen = len(data) + GCM_IV_SIZE + GCM_ICV_SIZE +\
+ len(ikev2.IKEv2_payload_Encrypted())
+ tlen = plen + len(ikev2.IKEv2())
+
+ # prepare aad data
+ sk_p = ikev2.IKEv2_payload_Encrypted(next_payload=first_payload,
+ length=plen)
+ header.length = tlen
+ res = header / sk_p
+ encr = self.sa.encrypt(raw(plain), raw(res))
+ sk_p = ikev2.IKEv2_payload_Encrypted(next_payload=first_payload,
+ length=plen, load=encr)
+ res = header / sk_p
+ else:
+ encr = self.sa.encrypt(raw(plain))
+ trunc_len = self.sa.ike_integ_alg.trunc_len
+ plen = len(encr) + len(ikev2.IKEv2_payload_Encrypted()) + trunc_len
+ tlen = plen + len(ikev2.IKEv2())
+
+ sk_p = ikev2.IKEv2_payload_Encrypted(next_payload=first_payload,
+ length=plen, load=encr)
+ header.length = tlen
+ res = header / sk_p
+
+ integ_data = raw(res)
+ hmac_data = self.sa.compute_hmac(self.sa.ike_integ_alg.mod(),
+ self.sa.my_authkey, integ_data)
+ res = res / Raw(hmac_data[:trunc_len])
+ assert(len(res) == tlen)
+ return res
+
+ def verify_udp_encap(self, ipsec_sa):
+ e = VppEnum.vl_api_ipsec_sad_flags_t
+ if self.sa.udp_encap or self.sa.natt:
+ self.assertIn(e.IPSEC_API_SAD_FLAG_UDP_ENCAP, ipsec_sa.flags)
+ else:
+ self.assertNotIn(e.IPSEC_API_SAD_FLAG_UDP_ENCAP, ipsec_sa.flags)
+
+ def verify_ipsec_sas(self, is_rekey=False):
+ sas = self.vapi.ipsec_sa_dump()
+ if is_rekey:
+ # after rekey there is a short period of time in which old
+ # inbound SA is still present
+ sa_count = 3
+ else:
+ sa_count = 2
+ self.assertEqual(len(sas), sa_count)
+ if self.sa.is_initiator:
+ if is_rekey:
+ sa0 = sas[0].entry
+ sa1 = sas[2].entry
+ else:
+ sa0 = sas[0].entry
+ sa1 = sas[1].entry
+ else:
+ if is_rekey:
+ sa0 = sas[2].entry
+ sa1 = sas[0].entry
+ else:
+ sa1 = sas[0].entry
+ sa0 = sas[1].entry
+
+ c = self.sa.child_sas[0]
+
+ self.verify_udp_encap(sa0)
+ self.verify_udp_encap(sa1)
+ vpp_crypto_alg = self.vpp_enums[self.sa.vpp_esp_cypto_alg]
+ self.assertEqual(sa0.crypto_algorithm, vpp_crypto_alg)
+ self.assertEqual(sa1.crypto_algorithm, vpp_crypto_alg)
+
+ if self.sa.esp_integ is None:
+ vpp_integ_alg = 0
+ else:
+ vpp_integ_alg = self.vpp_enums[self.sa.esp_integ]
+ self.assertEqual(sa0.integrity_algorithm, vpp_integ_alg)
+ self.assertEqual(sa1.integrity_algorithm, vpp_integ_alg)
+
+ # verify crypto keys
+ self.assertEqual(sa0.crypto_key.length, len(c.sk_er))
+ self.assertEqual(sa1.crypto_key.length, len(c.sk_ei))
+ self.assertEqual(sa0.crypto_key.data[:len(c.sk_er)], c.sk_er)
+ self.assertEqual(sa1.crypto_key.data[:len(c.sk_ei)], c.sk_ei)
+
+ # verify integ keys
+ if vpp_integ_alg:
+ self.assertEqual(sa0.integrity_key.length, len(c.sk_ar))
+ self.assertEqual(sa1.integrity_key.length, len(c.sk_ai))
+ self.assertEqual(sa0.integrity_key.data[:len(c.sk_ar)], c.sk_ar)
+ self.assertEqual(sa1.integrity_key.data[:len(c.sk_ai)], c.sk_ai)
+ else:
+ self.assertEqual(sa0.salt.to_bytes(4, 'little'), c.salt_er)
+ self.assertEqual(sa1.salt.to_bytes(4, 'little'), c.salt_ei)
+
+ def verify_keymat(self, api_keys, keys, name):
+ km = getattr(keys, name)
+ api_km = getattr(api_keys, name)
+ api_km_len = getattr(api_keys, name + '_len')
+ self.assertEqual(len(km), api_km_len)
+ self.assertEqual(km, api_km[:api_km_len])
+
+ def verify_id(self, api_id, exp_id):
+ self.assertEqual(api_id.type, IDType.value(exp_id.type))
+ self.assertEqual(api_id.data_len, exp_id.data_len)
+ self.assertEqual(bytes(api_id.data, 'ascii'), exp_id.type)
+
+ def verify_ike_sas(self):
+ r = self.vapi.ikev2_sa_dump()
+ self.assertEqual(len(r), 1)
+ sa = r[0].sa
+ self.assertEqual(self.sa.ispi, (sa.ispi).to_bytes(8, 'big'))
+ self.assertEqual(self.sa.rspi, (sa.rspi).to_bytes(8, 'big'))
+ if self.ip6:
+ if self.sa.is_initiator:
+ self.assertEqual(sa.iaddr, IPv6Address(self.pg0.remote_ip6))
+ self.assertEqual(sa.raddr, IPv6Address(self.pg0.local_ip6))
+ else:
+ self.assertEqual(sa.iaddr, IPv6Address(self.pg0.local_ip6))
+ self.assertEqual(sa.raddr, IPv6Address(self.pg0.remote_ip6))
+ else:
+ if self.sa.is_initiator:
+ self.assertEqual(sa.iaddr, IPv4Address(self.pg0.remote_ip4))
+ self.assertEqual(sa.raddr, IPv4Address(self.pg0.local_ip4))
+ else:
+ self.assertEqual(sa.iaddr, IPv4Address(self.pg0.local_ip4))
+ self.assertEqual(sa.raddr, IPv4Address(self.pg0.remote_ip4))
+ self.verify_keymat(sa.keys, self.sa, 'sk_d')
+ self.verify_keymat(sa.keys, self.sa, 'sk_ai')
+ self.verify_keymat(sa.keys, self.sa, 'sk_ar')
+ self.verify_keymat(sa.keys, self.sa, 'sk_ei')
+ self.verify_keymat(sa.keys, self.sa, 'sk_er')
+ self.verify_keymat(sa.keys, self.sa, 'sk_pi')
+ self.verify_keymat(sa.keys, self.sa, 'sk_pr')
+
+ self.assertEqual(sa.i_id.type, self.sa.id_type)
+ self.assertEqual(sa.r_id.type, self.sa.id_type)
+ self.assertEqual(sa.i_id.data_len, len(self.sa.i_id))
+ self.assertEqual(sa.r_id.data_len, len(self.sa.r_id))
+ self.assertEqual(bytes(sa.i_id.data, 'ascii'), self.sa.i_id)
+ self.assertEqual(bytes(sa.r_id.data, 'ascii'), self.sa.r_id)
+
+ r = self.vapi.ikev2_child_sa_dump(sa_index=sa.sa_index)
+ self.assertEqual(len(r), 1)
+ csa = r[0].child_sa
+ self.assertEqual(csa.sa_index, sa.sa_index)
+ c = self.sa.child_sas[0]
+ if hasattr(c, 'sk_ai'):
+ self.verify_keymat(csa.keys, c, 'sk_ai')
+ self.verify_keymat(csa.keys, c, 'sk_ar')
+ self.verify_keymat(csa.keys, c, 'sk_ei')
+ self.verify_keymat(csa.keys, c, 'sk_er')
+ self.assertEqual(csa.i_spi.to_bytes(4, 'big'), c.ispi)
+ self.assertEqual(csa.r_spi.to_bytes(4, 'big'), c.rspi)
+
+ tsi, tsr = self.sa.generate_ts(self.p.ts_is_ip4)
+ tsi = tsi[0]
+ tsr = tsr[0]
+ r = self.vapi.ikev2_traffic_selector_dump(
+ is_initiator=True, sa_index=sa.sa_index,
+ child_sa_index=csa.child_sa_index)
+ self.assertEqual(len(r), 1)
+ ts = r[0].ts
+ self.verify_ts(r[0].ts, tsi[0], True)
+
+ r = self.vapi.ikev2_traffic_selector_dump(
+ is_initiator=False, sa_index=sa.sa_index,
+ child_sa_index=csa.child_sa_index)
+ self.assertEqual(len(r), 1)
+ self.verify_ts(r[0].ts, tsr[0], False)
+
+ n = self.vapi.ikev2_nonce_get(is_initiator=True,
+ sa_index=sa.sa_index)
+ self.verify_nonce(n, self.sa.i_nonce)
+ n = self.vapi.ikev2_nonce_get(is_initiator=False,
+ sa_index=sa.sa_index)
+ self.verify_nonce(n, self.sa.r_nonce)
+
+ def verify_nonce(self, api_nonce, nonce):
+ self.assertEqual(api_nonce.data_len, len(nonce))
+ self.assertEqual(api_nonce.nonce, nonce)
+
+ def verify_ts(self, api_ts, ts, is_initiator):
+ if is_initiator:
+ self.assertTrue(api_ts.is_local)
+ else:
+ self.assertFalse(api_ts.is_local)
+
+ if self.p.ts_is_ip4:
+ self.assertEqual(api_ts.start_addr,
+ IPv4Address(ts.starting_address_v4))
+ self.assertEqual(api_ts.end_addr,
+ IPv4Address(ts.ending_address_v4))
+ else:
+ self.assertEqual(api_ts.start_addr,
+ IPv6Address(ts.starting_address_v6))
+ self.assertEqual(api_ts.end_addr,
+ IPv6Address(ts.ending_address_v6))
+ self.assertEqual(api_ts.start_port, ts.start_port)
+ self.assertEqual(api_ts.end_port, ts.end_port)
+ self.assertEqual(api_ts.protocol_id, ts.IP_protocol_ID)
+
+
+class TemplateInitiator(IkePeer):
+ """ initiator test template """
+
+ def initiate_del_sa_from_initiator(self):
+ ispi = int.from_bytes(self.sa.ispi, 'little')
+ self.pg0.enable_capture()
+ self.pg_start()
+ self.vapi.ikev2_initiate_del_ike_sa(ispi=ispi)
+ capture = self.pg0.get_capture(1)
+ ih = self.get_ike_header(capture[0])
+ self.assertNotIn('Response', ih.flags)
+ self.assertIn('Initiator', ih.flags)
+ self.assertEqual(ih.init_SPI, self.sa.ispi)
+ self.assertEqual(ih.resp_SPI, self.sa.rspi)
+ plain = self.sa.hmac_and_decrypt(ih)
+ d = ikev2.IKEv2_payload_Delete(plain)
+ self.assertEqual(d.proto, 1) # proto=IKEv2
+ header = ikev2.IKEv2(init_SPI=self.sa.ispi, resp_SPI=self.sa.rspi,
+ flags='Response', exch_type='INFORMATIONAL',
+ id=ih.id, next_payload='Encrypted')
+ resp = self.encrypt_ike_msg(header, b'', None)
+ self.send_and_assert_no_replies(self.pg0, resp)
+
+ def verify_del_sa(self, packet):
+ ih = self.get_ike_header(packet)
+ self.assertEqual(ih.id, self.sa.msg_id)
+ self.assertEqual(ih.exch_type, 37) # exchange informational
+ self.assertIn('Response', ih.flags)
+ self.assertIn('Initiator', ih.flags)
+ plain = self.sa.hmac_and_decrypt(ih)
+ self.assertEqual(plain, b'')
+
+ def initiate_del_sa_from_responder(self):
+ header = ikev2.IKEv2(init_SPI=self.sa.ispi, resp_SPI=self.sa.rspi,
+ exch_type='INFORMATIONAL',
+ id=self.sa.new_msg_id())
+ del_sa = ikev2.IKEv2_payload_Delete(proto='IKEv2')
+ ike_msg = self.encrypt_ike_msg(header, del_sa, 'Delete')
+ packet = self.create_packet(self.pg0, ike_msg,
+ self.sa.sport, self.sa.dport,
+ self.sa.natt, self.ip6)
+ self.pg0.add_stream(packet)
+ self.pg0.enable_capture()
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ self.verify_del_sa(capture[0])
+
+ @staticmethod
+ def find_notify_payload(packet, notify_type):
+ n = packet[ikev2.IKEv2_payload_Notify]
+ while n is not None:
+ if n.type == notify_type:
+ return n
+ n = n.payload
+ return None
+
+ def verify_nat_detection(self, packet):
+ if self.ip6:
+ iph = packet[IPv6]
+ else:
+ iph = packet[IP]
+ udp = packet[UDP]
+
+ # NAT_DETECTION_SOURCE_IP
+ s = self.find_notify_payload(packet, 16388)
+ self.assertIsNotNone(s)
+ src_sha = self.sa.compute_nat_sha1(
+ inet_pton(socket.AF_INET, iph.src), udp.sport, b'\x00' * 8)
+ self.assertEqual(s.load, src_sha)
+
+ # NAT_DETECTION_DESTINATION_IP
+ s = self.find_notify_payload(packet, 16389)
+ self.assertIsNotNone(s)
+ dst_sha = self.sa.compute_nat_sha1(
+ inet_pton(socket.AF_INET, iph.dst), udp.dport, b'\x00' * 8)
+ self.assertEqual(s.load, dst_sha)
+
+ def verify_sa_init_request(self, packet):
+ udp = packet[UDP]
+ self.sa.dport = udp.sport
+ ih = packet[ikev2.IKEv2]
+ self.assertNotEqual(ih.init_SPI, 8 * b'\x00')
+ self.assertEqual(ih.exch_type, 34) # SA_INIT
+ self.sa.ispi = ih.init_SPI
+ self.assertEqual(ih.resp_SPI, 8 * b'\x00')
+ self.assertIn('Initiator', ih.flags)
+ self.assertNotIn('Response', ih.flags)
+ self.sa.i_nonce = ih[ikev2.IKEv2_payload_Nonce].load
+ self.sa.i_dh_data = ih[ikev2.IKEv2_payload_KE].load
+
+ prop = packet[ikev2.IKEv2_payload_Proposal]
+ self.assertEqual(prop.proto, 1) # proto = ikev2
+ self.assertEqual(prop.proposal, 1)
+ self.assertEqual(prop.trans[0].transform_type, 1) # encryption
+ self.assertEqual(prop.trans[0].transform_id,
+ self.p.ike_transforms['crypto_alg'])
+ self.assertEqual(prop.trans[1].transform_type, 2) # prf
+ self.assertEqual(prop.trans[1].transform_id, 5) # "hmac-sha2-256"
+ self.assertEqual(prop.trans[2].transform_type, 4) # dh
+ self.assertEqual(prop.trans[2].transform_id,
+ self.p.ike_transforms['dh_group'])
+
+ self.verify_nat_detection(packet)
+ self.sa.set_ike_props(
+ crypto='AES-GCM-16ICV', crypto_key_len=32,
+ integ='NULL', prf='PRF_HMAC_SHA2_256', dh='3072MODPgr')
+ self.sa.set_esp_props(crypto='AES-CBC', crypto_key_len=32,
+ integ='SHA2-256-128')
+ self.sa.generate_dh_data()
+ self.sa.complete_dh_data()
+ self.sa.calc_keys()
+
+ def update_esp_transforms(self, trans, sa):
+ while trans:
+ if trans.transform_type == 1: # ecryption
+ sa.esp_crypto = CRYPTO_IDS[trans.transform_id]
+ elif trans.transform_type == 3: # integrity
+ sa.esp_integ = INTEG_IDS[trans.transform_id]
+ trans = trans.payload
+
+ def verify_sa_auth_req(self, packet):
+ udp = packet[UDP]
+ self.sa.dport = udp.sport
+ ih = self.get_ike_header(packet)
+ self.assertEqual(ih.resp_SPI, self.sa.rspi)
+ self.assertEqual(ih.init_SPI, self.sa.ispi)
+ self.assertEqual(ih.exch_type, 35) # IKE_AUTH
+ self.assertIn('Initiator', ih.flags)
+ self.assertNotIn('Response', ih.flags)
+
+ udp = packet[UDP]
+ self.verify_udp(udp)
+ self.assertEqual(ih.id, self.sa.msg_id + 1)
+ self.sa.msg_id += 1
+ plain = self.sa.hmac_and_decrypt(ih)
+ idi = ikev2.IKEv2_payload_IDi(plain)
+ idr = ikev2.IKEv2_payload_IDr(idi.payload)
+ self.assertEqual(idi.load, self.sa.i_id)
+ self.assertEqual(idr.load, self.sa.r_id)
+ prop = idi[ikev2.IKEv2_payload_Proposal]
+ c = self.sa.child_sas[0]
+ c.ispi = prop.SPI
+ self.update_esp_transforms(
+ prop[ikev2.IKEv2_payload_Transform], self.sa)
+
+ def send_init_response(self):
+ tr_attr = self.sa.ike_crypto_attr()
+ trans = (ikev2.IKEv2_payload_Transform(transform_type='Encryption',
+ transform_id=self.sa.ike_crypto, length=tr_attr[1],
+ key_length=tr_attr[0]) /
+ ikev2.IKEv2_payload_Transform(transform_type='Integrity',
+ transform_id=self.sa.ike_integ) /
+ ikev2.IKEv2_payload_Transform(transform_type='PRF',
+ transform_id=self.sa.ike_prf_alg.name) /
+ ikev2.IKEv2_payload_Transform(transform_type='GroupDesc',
+ transform_id=self.sa.ike_dh))
+ props = (ikev2.IKEv2_payload_Proposal(proposal=1, proto='IKEv2',
+ trans_nb=4, trans=trans))
+
+ src_address = inet_pton(socket.AF_INET, self.pg0.remote_ip4)
+ if self.sa.natt:
+ dst_address = b'\x0a\x0a\x0a\x0a'
+ else:
+ dst_address = inet_pton(socket.AF_INET, self.pg0.local_ip4)
+ src_nat = self.sa.compute_nat_sha1(src_address, self.sa.sport)
+ dst_nat = self.sa.compute_nat_sha1(dst_address, self.sa.dport)
+
+ self.sa.init_resp_packet = (
+ ikev2.IKEv2(init_SPI=self.sa.ispi, resp_SPI=self.sa.rspi,
+ exch_type='IKE_SA_INIT', flags='Response') /
+ ikev2.IKEv2_payload_SA(next_payload='KE', prop=props) /
+ ikev2.IKEv2_payload_KE(next_payload='Nonce',
+ group=self.sa.ike_dh,
+ load=self.sa.my_dh_pub_key) /
+ ikev2.IKEv2_payload_Nonce(load=self.sa.r_nonce,
+ next_payload='Notify') /
+ ikev2.IKEv2_payload_Notify(
+ type='NAT_DETECTION_SOURCE_IP', load=src_nat,
+ next_payload='Notify') / ikev2.IKEv2_payload_Notify(
+ type='NAT_DETECTION_DESTINATION_IP', load=dst_nat))
+
+ ike_msg = self.create_packet(self.pg0, self.sa.init_resp_packet,
+ self.sa.sport, self.sa.dport,
+ False, self.ip6)
+ self.pg_send(self.pg0, ike_msg)
+ capture = self.pg0.get_capture(1)
+ self.verify_sa_auth_req(capture[0])
+
+ def initiate_sa_init(self):
+ self.pg0.enable_capture()
+ self.pg_start()
+ self.vapi.ikev2_initiate_sa_init(name=self.p.profile_name)
+
+ capture = self.pg0.get_capture(1)
+ self.verify_sa_init_request(capture[0])
+ self.send_init_response()
+
+ def send_auth_response(self):
+ tr_attr = self.sa.esp_crypto_attr()
+ trans = (ikev2.IKEv2_payload_Transform(transform_type='Encryption',
+ transform_id=self.sa.esp_crypto, length=tr_attr[1],
+ key_length=tr_attr[0]) /
+ ikev2.IKEv2_payload_Transform(transform_type='Integrity',
+ transform_id=self.sa.esp_integ) /
+ ikev2.IKEv2_payload_Transform(
+ transform_type='Extended Sequence Number',
+ transform_id='No ESN') /
+ ikev2.IKEv2_payload_Transform(
+ transform_type='Extended Sequence Number',
+ transform_id='ESN'))
+
+ c = self.sa.child_sas[0]
+ props = (ikev2.IKEv2_payload_Proposal(proposal=1, proto='ESP',
+ SPIsize=4, SPI=c.rspi, trans_nb=4, trans=trans))
+
+ tsi, tsr = self.sa.generate_ts(self.p.ts_is_ip4)
+ plain = (ikev2.IKEv2_payload_IDi(next_payload='IDr',
+ IDtype=self.sa.id_type, load=self.sa.i_id) /
+ ikev2.IKEv2_payload_IDr(next_payload='AUTH',
+ IDtype=self.sa.id_type, load=self.sa.r_id) /
+ ikev2.IKEv2_payload_AUTH(next_payload='SA',
+ auth_type=AuthMethod.value(self.sa.auth_method),
+ load=self.sa.auth_data) /
+ ikev2.IKEv2_payload_SA(next_payload='TSi', prop=props) /
+ ikev2.IKEv2_payload_TSi(next_payload='TSr',
+ number_of_TSs=len(tsi),
+ traffic_selector=tsi) /
+ ikev2.IKEv2_payload_TSr(next_payload='Notify',
+ number_of_TSs=len(tsr),
+ traffic_selector=tsr) /
+ ikev2.IKEv2_payload_Notify(type='INITIAL_CONTACT'))
+
+ header = ikev2.IKEv2(
+ init_SPI=self.sa.ispi,
+ resp_SPI=self.sa.rspi, id=self.sa.new_msg_id(),
+ flags='Response', exch_type='IKE_AUTH')
+
+ ike_msg = self.encrypt_ike_msg(header, plain, 'IDi')
+ packet = self.create_packet(self.pg0, ike_msg, self.sa.sport,
+ self.sa.dport, self.sa.natt, self.ip6)
+ self.pg_send(self.pg0, packet)
+
+ def test_initiator(self):
+ self.initiate_sa_init()
+ self.sa.auth_init()
+ self.sa.calc_child_keys()
+ self.send_auth_response()
+ self.verify_ike_sas()
+
+
+class TemplateResponder(IkePeer):
+ """ responder test template """
+
+ def initiate_del_sa_from_responder(self):
+ self.pg0.enable_capture()
+ self.pg_start()
+ self.vapi.ikev2_initiate_del_ike_sa(
+ ispi=int.from_bytes(self.sa.ispi, 'little'))
+ capture = self.pg0.get_capture(1)
+ ih = self.get_ike_header(capture[0])
+ self.assertNotIn('Response', ih.flags)
+ self.assertNotIn('Initiator', ih.flags)
+ self.assertEqual(ih.exch_type, 37) # INFORMATIONAL
+ plain = self.sa.hmac_and_decrypt(ih)
+ d = ikev2.IKEv2_payload_Delete(plain)
+ self.assertEqual(d.proto, 1) # proto=IKEv2
+ self.assertEqual(ih.init_SPI, self.sa.ispi)
+ self.assertEqual(ih.resp_SPI, self.sa.rspi)
+ header = ikev2.IKEv2(init_SPI=self.sa.ispi, resp_SPI=self.sa.rspi,
+ flags='Initiator+Response',
+ exch_type='INFORMATIONAL',
+ id=ih.id, next_payload='Encrypted')
+ resp = self.encrypt_ike_msg(header, b'', None)
+ self.send_and_assert_no_replies(self.pg0, resp)
+
+ def verify_del_sa(self, packet):
+ ih = self.get_ike_header(packet)
+ self.assertEqual(ih.id, self.sa.msg_id)
+ self.assertEqual(ih.exch_type, 37) # exchange informational
+ self.assertIn('Response', ih.flags)
+ self.assertNotIn('Initiator', ih.flags)
+ self.assertEqual(ih.next_payload, 46) # Encrypted
+ self.assertEqual(ih.init_SPI, self.sa.ispi)
+ self.assertEqual(ih.resp_SPI, self.sa.rspi)
+ plain = self.sa.hmac_and_decrypt(ih)
+ self.assertEqual(plain, b'')
+
+ def initiate_del_sa_from_initiator(self):
+ header = ikev2.IKEv2(init_SPI=self.sa.ispi, resp_SPI=self.sa.rspi,
+ flags='Initiator', exch_type='INFORMATIONAL',
+ id=self.sa.new_msg_id())
+ del_sa = ikev2.IKEv2_payload_Delete(proto='IKEv2')
+ ike_msg = self.encrypt_ike_msg(header, del_sa, 'Delete')
+ packet = self.create_packet(self.pg0, ike_msg,
+ self.sa.sport, self.sa.dport,
+ self.sa.natt, self.ip6)
+ self.pg0.add_stream(packet)
+ self.pg0.enable_capture()
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ self.verify_del_sa(capture[0])
+
+ def send_sa_init_req(self):
+ tr_attr = self.sa.ike_crypto_attr()
+ trans = (ikev2.IKEv2_payload_Transform(transform_type='Encryption',
+ transform_id=self.sa.ike_crypto, length=tr_attr[1],
+ key_length=tr_attr[0]) /
+ ikev2.IKEv2_payload_Transform(transform_type='Integrity',
+ transform_id=self.sa.ike_integ) /
+ ikev2.IKEv2_payload_Transform(transform_type='PRF',
+ transform_id=self.sa.ike_prf_alg.name) /
+ ikev2.IKEv2_payload_Transform(transform_type='GroupDesc',
+ transform_id=self.sa.ike_dh))
+
+ props = (ikev2.IKEv2_payload_Proposal(proposal=1, proto='IKEv2',
+ trans_nb=4, trans=trans))
+
+ next_payload = None if self.ip6 else 'Notify'
+
+ self.sa.init_req_packet = (
+ ikev2.IKEv2(init_SPI=self.sa.ispi,
+ flags='Initiator', exch_type='IKE_SA_INIT') /
+ ikev2.IKEv2_payload_SA(next_payload='KE', prop=props) /
+ ikev2.IKEv2_payload_KE(next_payload='Nonce',
+ group=self.sa.ike_dh,
+ load=self.sa.my_dh_pub_key) /
+ ikev2.IKEv2_payload_Nonce(next_payload=next_payload,
+ load=self.sa.i_nonce))
+
+ if not self.ip6:
+ if self.sa.i_natt:
+ src_address = b'\x0a\x0a\x0a\x01'
+ else:
+ src_address = inet_pton(socket.AF_INET, self.pg0.remote_ip4)
+
+ if self.sa.r_natt:
+ dst_address = b'\x0a\x0a\x0a\x0a'
+ else:
+ dst_address = inet_pton(socket.AF_INET, self.pg0.local_ip4)
+
+ src_nat = self.sa.compute_nat_sha1(src_address, self.sa.sport)
+ dst_nat = self.sa.compute_nat_sha1(dst_address, self.sa.dport)
+ nat_src_detection = ikev2.IKEv2_payload_Notify(
+ type='NAT_DETECTION_SOURCE_IP', load=src_nat,
+ next_payload='Notify')
+ nat_dst_detection = ikev2.IKEv2_payload_Notify(
+ type='NAT_DETECTION_DESTINATION_IP', load=dst_nat)
+ self.sa.init_req_packet = (self.sa.init_req_packet /
+ nat_src_detection /
+ nat_dst_detection)
+
+ ike_msg = self.create_packet(self.pg0, self.sa.init_req_packet,
+ self.sa.sport, self.sa.dport,
+ self.sa.natt, self.ip6)
+ self.pg0.add_stream(ike_msg)
+ self.pg0.enable_capture()
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ self.verify_sa_init(capture[0])
+
+ def generate_auth_payload(self, last_payload=None, is_rekey=False):
+ tr_attr = self.sa.esp_crypto_attr()
+ last_payload = last_payload or 'Notify'
+ trans = (ikev2.IKEv2_payload_Transform(transform_type='Encryption',
+ transform_id=self.sa.esp_crypto, length=tr_attr[1],
+ key_length=tr_attr[0]) /
+ ikev2.IKEv2_payload_Transform(transform_type='Integrity',
+ transform_id=self.sa.esp_integ) /
+ ikev2.IKEv2_payload_Transform(
+ transform_type='Extended Sequence Number',
+ transform_id='No ESN') /
+ ikev2.IKEv2_payload_Transform(
+ transform_type='Extended Sequence Number',
+ transform_id='ESN'))
+
+ c = self.sa.child_sas[0]
+ props = (ikev2.IKEv2_payload_Proposal(proposal=1, proto='ESP',
+ SPIsize=4, SPI=c.ispi, trans_nb=4, trans=trans))
+
+ tsi, tsr = self.sa.generate_ts(self.p.ts_is_ip4)
+ plain = (ikev2.IKEv2_payload_AUTH(next_payload='SA',
+ auth_type=AuthMethod.value(self.sa.auth_method),
+ load=self.sa.auth_data) /
+ ikev2.IKEv2_payload_SA(next_payload='TSi', prop=props) /
+ ikev2.IKEv2_payload_TSi(next_payload='TSr',
+ number_of_TSs=len(tsi), traffic_selector=tsi) /
+ ikev2.IKEv2_payload_TSr(next_payload=last_payload,
+ number_of_TSs=len(tsr), traffic_selector=tsr))
+
+ if is_rekey:
+ first_payload = 'Nonce'
+ plain = (ikev2.IKEv2_payload_Nonce(load=self.sa.i_nonce,
+ next_payload='SA') / plain /
+ ikev2.IKEv2_payload_Notify(type='REKEY_SA',
+ proto='ESP', SPI=c.ispi))
+ else:
+ first_payload = 'IDi'
+ ids = (ikev2.IKEv2_payload_IDi(next_payload='IDr',
+ IDtype=self.sa.id_type, load=self.sa.i_id) /
+ ikev2.IKEv2_payload_IDr(next_payload='AUTH',
+ IDtype=self.sa.id_type, load=self.sa.r_id))
+ plain = ids / plain
+ return plain, first_payload
+
+ def send_sa_auth(self):
+ plain, first_payload = self.generate_auth_payload(
+ last_payload='Notify')
+ plain = plain / ikev2.IKEv2_payload_Notify(type='INITIAL_CONTACT')
+ header = ikev2.IKEv2(
+ init_SPI=self.sa.ispi,
+ resp_SPI=self.sa.rspi, id=self.sa.new_msg_id(),
+ flags='Initiator', exch_type='IKE_AUTH')
+
+ ike_msg = self.encrypt_ike_msg(header, plain, first_payload)
+ packet = self.create_packet(self.pg0, ike_msg, self.sa.sport,
+ self.sa.dport, self.sa.natt, self.ip6)
+ self.pg0.add_stream(packet)
+ self.pg0.enable_capture()
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ self.verify_sa_auth_resp(capture[0])
+
+ def verify_sa_init(self, packet):
+ ih = self.get_ike_header(packet)
+
+ self.assertEqual(ih.id, self.sa.msg_id)
+ self.assertEqual(ih.exch_type, 34)
+ self.assertIn('Response', ih.flags)
+ self.assertEqual(ih.init_SPI, self.sa.ispi)
+ self.assertNotEqual(ih.resp_SPI, 0)
+ self.sa.rspi = ih.resp_SPI
+ try:
+ sa = ih[ikev2.IKEv2_payload_SA]
+ self.sa.r_nonce = ih[ikev2.IKEv2_payload_Nonce].load
+ self.sa.r_dh_data = ih[ikev2.IKEv2_payload_KE].load
+ except IndexError as e:
+ self.logger.error("unexpected reply: SA/Nonce/KE payload found!")
+ self.logger.error(ih.show())
+ raise
+ self.sa.complete_dh_data()
+ self.sa.calc_keys()
+ self.sa.auth_init()
+
+ def verify_sa_auth_resp(self, packet):
+ ike = self.get_ike_header(packet)
+ udp = packet[UDP]
+ self.verify_udp(udp)
+ self.assertEqual(ike.id, self.sa.msg_id)
+ plain = self.sa.hmac_and_decrypt(ike)
+ idr = ikev2.IKEv2_payload_IDr(plain)
+ prop = idr[ikev2.IKEv2_payload_Proposal]
+ self.assertEqual(prop.SPIsize, 4)
+ self.sa.child_sas[0].rspi = prop.SPI
+ self.sa.calc_child_keys()
+
+ IKE_NODE_SUFFIX = 'ip4'
+
+ def verify_counters(self):
+ self.assert_counter(2, 'processed', self.IKE_NODE_SUFFIX)
+ self.assert_counter(1, 'init_sa_req', self.IKE_NODE_SUFFIX)
+ self.assert_counter(1, 'ike_auth_req', self.IKE_NODE_SUFFIX)
+
+ r = self.vapi.ikev2_sa_dump()
+ s = r[0].sa.stats
+ self.assertEqual(1, s.n_sa_auth_req)
+ self.assertEqual(1, s.n_sa_init_req)
+
+ def test_responder(self):
+ self.send_sa_init_req()
+ self.send_sa_auth()
+ self.verify_ipsec_sas()
+ self.verify_ike_sas()
+ self.verify_counters()
+
+
+class Ikev2Params(object):
+ def config_params(self, params={}):
+ ec = VppEnum.vl_api_ipsec_crypto_alg_t
+ ei = VppEnum.vl_api_ipsec_integ_alg_t
+ self.vpp_enums = {
+ 'AES-CBC-128': ec.IPSEC_API_CRYPTO_ALG_AES_CBC_128,
+ 'AES-CBC-192': ec.IPSEC_API_CRYPTO_ALG_AES_CBC_192,
+ 'AES-CBC-256': ec.IPSEC_API_CRYPTO_ALG_AES_CBC_256,
+ 'AES-GCM-16ICV-128': ec.IPSEC_API_CRYPTO_ALG_AES_GCM_128,
+ 'AES-GCM-16ICV-192': ec.IPSEC_API_CRYPTO_ALG_AES_GCM_192,
+ 'AES-GCM-16ICV-256': ec.IPSEC_API_CRYPTO_ALG_AES_GCM_256,
+
+ 'HMAC-SHA1-96': ei.IPSEC_API_INTEG_ALG_SHA1_96,
+ 'SHA2-256-128': ei.IPSEC_API_INTEG_ALG_SHA_256_128,
+ 'SHA2-384-192': ei.IPSEC_API_INTEG_ALG_SHA_384_192,
+ 'SHA2-512-256': ei.IPSEC_API_INTEG_ALG_SHA_512_256}
+
+ dpd_disabled = True if 'dpd_disabled' not in params else\
+ params['dpd_disabled']
+ if dpd_disabled:
+ self.vapi.cli('ikev2 dpd disable')
+ self.del_sa_from_responder = False if 'del_sa_from_responder'\
+ not in params else params['del_sa_from_responder']
+ i_natt = False if 'i_natt' not in params else params['i_natt']
+ r_natt = False if 'r_natt' not in params else params['r_natt']
+ self.p = Profile(self, 'pr1')
+ self.ip6 = False if 'ip6' not in params else params['ip6']
+
+ if 'auth' in params and params['auth'] == 'rsa-sig':
+ auth_method = 'rsa-sig'
+ work_dir = os.getenv('BR') + '/../src/plugins/ikev2/test/certs/'
+ self.vapi.ikev2_set_local_key(
+ key_file=work_dir + params['server-key'])
+
+ client_file = work_dir + params['client-cert']
+ server_pem = open(work_dir + params['server-cert']).read()
+ client_priv = open(work_dir + params['client-key']).read()
+ client_priv = load_pem_private_key(str.encode(client_priv), None,
+ default_backend())
+ self.peer_cert = x509.load_pem_x509_certificate(
+ str.encode(server_pem),
+ default_backend())
+ self.p.add_auth(method='rsa-sig', data=str.encode(client_file))
+ auth_data = None
+ else:
+ auth_data = b'$3cr3tpa$$w0rd'
+ self.p.add_auth(method='shared-key', data=auth_data)
+ auth_method = 'shared-key'
+ client_priv = None
+
+ is_init = True if 'is_initiator' not in params else\
+ params['is_initiator']
+
+ idr = {'id_type': 'fqdn', 'data': b'vpp.home'}
+ idi = {'id_type': 'fqdn', 'data': b'roadwarrior.example.com'}
+ if is_init:
+ self.p.add_local_id(**idr)
+ self.p.add_remote_id(**idi)
+ else:
+ self.p.add_local_id(**idi)
+ self.p.add_remote_id(**idr)
+
+ loc_ts = {'start_addr': '10.10.10.0', 'end_addr': '10.10.10.255'} if\
+ 'loc_ts' not in params else params['loc_ts']
+ rem_ts = {'start_addr': '10.0.0.0', 'end_addr': '10.0.0.255'} if\
+ 'rem_ts' not in params else params['rem_ts']
+ self.p.add_local_ts(**loc_ts)
+ self.p.add_remote_ts(**rem_ts)
+ if 'responder' in params:
+ self.p.add_responder(params['responder'])
+ if 'ike_transforms' in params:
+ self.p.add_ike_transforms(params['ike_transforms'])
+ if 'esp_transforms' in params:
+ self.p.add_esp_transforms(params['esp_transforms'])
+
+ udp_encap = False if 'udp_encap' not in params else\
+ params['udp_encap']
+ if udp_encap:
+ self.p.set_udp_encap(True)
+
+ if 'responder_hostname' in params:
+ hn = params['responder_hostname']
+ self.p.add_responder_hostname(hn)
+
+ # configure static dns record
+ self.vapi.dns_name_server_add_del(
+ is_ip6=0, is_add=1,
+ server_address=IPv4Address(u'8.8.8.8').packed)
+ self.vapi.dns_enable_disable(enable=1)
+
+ cmd = "dns cache add {} {}".format(hn['hostname'],
+ self.pg0.remote_ip4)
+ self.vapi.cli(cmd)
+
+ self.sa = IKEv2SA(self, i_id=idi['data'], r_id=idr['data'],
+ is_initiator=is_init,
+ id_type=self.p.local_id['id_type'],
+ i_natt=i_natt, r_natt=r_natt,
+ priv_key=client_priv, auth_method=auth_method,
+ auth_data=auth_data, udp_encap=udp_encap,
+ local_ts=self.p.remote_ts, remote_ts=self.p.local_ts)
+ if is_init:
+ ike_crypto = ('AES-CBC', 32) if 'ike-crypto' not in params else\
+ params['ike-crypto']
+ ike_integ = 'HMAC-SHA1-96' if 'ike-integ' not in params else\
+ params['ike-integ']
+ ike_dh = '2048MODPgr' if 'ike-dh' not in params else\
+ params['ike-dh']
+
+ esp_crypto = ('AES-CBC', 32) if 'esp-crypto' not in params else\
+ params['esp-crypto']
+ esp_integ = 'HMAC-SHA1-96' if 'esp-integ' not in params else\
+ params['esp-integ']
+
+ self.sa.set_ike_props(
+ crypto=ike_crypto[0], crypto_key_len=ike_crypto[1],
+ integ=ike_integ, prf='PRF_HMAC_SHA2_256', dh=ike_dh)
+ self.sa.set_esp_props(
+ crypto=esp_crypto[0], crypto_key_len=esp_crypto[1],
+ integ=esp_integ)
+
+
+class TestApi(VppTestCase):
+ """ Test IKEV2 API """
+ @classmethod
+ def setUpClass(cls):
+ super(TestApi, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestApi, cls).tearDownClass()
+
+ def tearDown(self):
+ super(TestApi, self).tearDown()
+ self.p1.remove_vpp_config()
+ self.p2.remove_vpp_config()
+ r = self.vapi.ikev2_profile_dump()
+ self.assertEqual(len(r), 0)
+
+ def configure_profile(self, cfg):
+ p = Profile(self, cfg['name'])
+ p.add_local_id(id_type=cfg['loc_id'][0], data=cfg['loc_id'][1])
+ p.add_remote_id(id_type=cfg['rem_id'][0], data=cfg['rem_id'][1])
+ p.add_local_ts(**cfg['loc_ts'])
+ p.add_remote_ts(**cfg['rem_ts'])
+ p.add_responder(cfg['responder'])
+ p.add_ike_transforms(cfg['ike_ts'])
+ p.add_esp_transforms(cfg['esp_ts'])
+ p.add_auth(**cfg['auth'])
+ p.set_udp_encap(cfg['udp_encap'])
+ p.set_ipsec_over_udp_port(cfg['ipsec_over_udp_port'])
+ if 'lifetime_data' in cfg:
+ p.set_lifetime_data(cfg['lifetime_data'])
+ if 'tun_itf' in cfg:
+ p.set_tunnel_interface(cfg['tun_itf'])
+ if 'natt_disabled' in cfg and cfg['natt_disabled']:
+ p.disable_natt()
+ p.add_vpp_config()
+ return p
+
+ def test_profile_api(self):
+ """ test profile dump API """
+ loc_ts4 = {
+ 'proto': 8,
+ 'start_port': 1,
+ 'end_port': 19,
+ 'start_addr': '3.3.3.2',
+ 'end_addr': '3.3.3.3',
+ }
+ rem_ts4 = {
+ 'proto': 9,
+ 'start_port': 10,
+ 'end_port': 119,
+ 'start_addr': '4.5.76.80',
+ 'end_addr': '2.3.4.6',
+ }
+
+ loc_ts6 = {
+ 'proto': 8,
+ 'start_port': 1,
+ 'end_port': 19,
+ 'start_addr': 'ab::1',
+ 'end_addr': 'ab::4',
+ }
+ rem_ts6 = {
+ 'proto': 9,
+ 'start_port': 10,
+ 'end_port': 119,
+ 'start_addr': 'cd::12',
+ 'end_addr': 'cd::13',
+ }
+
+ conf = {
+ 'p1': {
+ 'name': 'p1',
+ 'natt_disabled': True,
+ 'loc_id': ('fqdn', b'vpp.home'),
+ 'rem_id': ('fqdn', b'roadwarrior.example.com'),
+ 'loc_ts': loc_ts4,
+ 'rem_ts': rem_ts4,
+ 'responder': {'sw_if_index': 0, 'addr': '5.6.7.8'},
+ 'ike_ts': {
+ 'crypto_alg': 20,
+ 'crypto_key_size': 32,
+ 'integ_alg': 1,
+ 'dh_group': 1},
+ 'esp_ts': {
+ 'crypto_alg': 13,
+ 'crypto_key_size': 24,
+ 'integ_alg': 2},
+ 'auth': {'method': 'shared-key', 'data': b'sharedkeydata'},
+ 'udp_encap': True,
+ 'ipsec_over_udp_port': 4501,
+ 'lifetime_data': {
+ 'lifetime': 123,
+ 'lifetime_maxdata': 20192,
+ 'lifetime_jitter': 9,
+ 'handover': 132},
+ },
+ 'p2': {
+ 'name': 'p2',
+ 'loc_id': ('ip4-addr', b'192.168.2.1'),
+ 'rem_id': ('ip6-addr', b'abcd::1'),
+ 'loc_ts': loc_ts6,
+ 'rem_ts': rem_ts6,
+ 'responder': {'sw_if_index': 4, 'addr': 'def::10'},
+ 'ike_ts': {
+ 'crypto_alg': 12,
+ 'crypto_key_size': 16,
+ 'integ_alg': 3,
+ 'dh_group': 3},
+ 'esp_ts': {
+ 'crypto_alg': 9,
+ 'crypto_key_size': 24,
+ 'integ_alg': 4},
+ 'auth': {'method': 'shared-key', 'data': b'sharedkeydata'},
+ 'udp_encap': False,
+ 'ipsec_over_udp_port': 4600,
+ 'tun_itf': 0}
+ }
+ self.p1 = self.configure_profile(conf['p1'])
+ self.p2 = self.configure_profile(conf['p2'])
+
+ r = self.vapi.ikev2_profile_dump()
+ self.assertEqual(len(r), 2)
+ self.verify_profile(r[0].profile, conf['p1'])
+ self.verify_profile(r[1].profile, conf['p2'])
+
+ def verify_id(self, api_id, cfg_id):
+ self.assertEqual(api_id.type, IDType.value(cfg_id[0]))
+ self.assertEqual(bytes(api_id.data, 'ascii'), cfg_id[1])
+
+ def verify_ts(self, api_ts, cfg_ts):
+ self.assertEqual(api_ts.protocol_id, cfg_ts['proto'])
+ self.assertEqual(api_ts.start_port, cfg_ts['start_port'])
+ self.assertEqual(api_ts.end_port, cfg_ts['end_port'])
+ self.assertEqual(api_ts.start_addr,
+ ip_address(text_type(cfg_ts['start_addr'])))
+ self.assertEqual(api_ts.end_addr,
+ ip_address(text_type(cfg_ts['end_addr'])))
+
+ def verify_responder(self, api_r, cfg_r):
+ self.assertEqual(api_r.sw_if_index, cfg_r['sw_if_index'])
+ self.assertEqual(api_r.addr, ip_address(cfg_r['addr']))
+
+ def verify_transforms(self, api_ts, cfg_ts):
+ self.assertEqual(api_ts.crypto_alg, cfg_ts['crypto_alg'])
+ self.assertEqual(api_ts.crypto_key_size, cfg_ts['crypto_key_size'])
+ self.assertEqual(api_ts.integ_alg, cfg_ts['integ_alg'])
+
+ def verify_ike_transforms(self, api_ts, cfg_ts):
+ self.verify_transforms(api_ts, cfg_ts)
+ self.assertEqual(api_ts.dh_group, cfg_ts['dh_group'])
+
+ def verify_esp_transforms(self, api_ts, cfg_ts):
+ self.verify_transforms(api_ts, cfg_ts)
+
+ def verify_auth(self, api_auth, cfg_auth):
+ self.assertEqual(api_auth.method, AuthMethod.value(cfg_auth['method']))
+ self.assertEqual(api_auth.data, cfg_auth['data'])
+ self.assertEqual(api_auth.data_len, len(cfg_auth['data']))
+
+ def verify_lifetime_data(self, p, ld):
+ self.assertEqual(p.lifetime, ld['lifetime'])
+ self.assertEqual(p.lifetime_maxdata, ld['lifetime_maxdata'])
+ self.assertEqual(p.lifetime_jitter, ld['lifetime_jitter'])
+ self.assertEqual(p.handover, ld['handover'])
+
+ def verify_profile(self, ap, cp):
+ self.assertEqual(ap.name, cp['name'])
+ self.assertEqual(ap.udp_encap, cp['udp_encap'])
+ self.verify_id(ap.loc_id, cp['loc_id'])
+ self.verify_id(ap.rem_id, cp['rem_id'])
+ self.verify_ts(ap.loc_ts, cp['loc_ts'])
+ self.verify_ts(ap.rem_ts, cp['rem_ts'])
+ self.verify_responder(ap.responder, cp['responder'])
+ self.verify_ike_transforms(ap.ike_ts, cp['ike_ts'])
+ self.verify_esp_transforms(ap.esp_ts, cp['esp_ts'])
+ self.verify_auth(ap.auth, cp['auth'])
+ natt_dis = False if 'natt_disabled' not in cp else cp['natt_disabled']
+ self.assertTrue(natt_dis == ap.natt_disabled)
+
+ if 'lifetime_data' in cp:
+ self.verify_lifetime_data(ap, cp['lifetime_data'])
+ self.assertEqual(ap.ipsec_over_udp_port, cp['ipsec_over_udp_port'])
+ if 'tun_itf' in cp:
+ self.assertEqual(ap.tun_itf, cp['tun_itf'])
+ else:
+ self.assertEqual(ap.tun_itf, 0xffffffff)
+
+
+@tag_fixme_vpp_workers
+class TestResponderBehindNAT(TemplateResponder, Ikev2Params):
+ """ test responder - responder behind NAT """
+
+ IKE_NODE_SUFFIX = 'ip4-natt'
+
+ def config_tc(self):
+ self.config_params({'r_natt': True})
+
+
+@tag_fixme_vpp_workers
+class TestInitiatorNATT(TemplateInitiator, Ikev2Params):
+ """ test ikev2 initiator - NAT traversal (intitiator behind NAT) """
+
+ def config_tc(self):
+ self.config_params({
+ 'i_natt': True,
+ 'is_initiator': False, # seen from test case perspective
+ # thus vpp is initiator
+ 'responder': {'sw_if_index': self.pg0.sw_if_index,
+ 'addr': self.pg0.remote_ip4},
+ 'ike-crypto': ('AES-GCM-16ICV', 32),
+ 'ike-integ': 'NULL',
+ 'ike-dh': '3072MODPgr',
+ 'ike_transforms': {
+ 'crypto_alg': 20, # "aes-gcm-16"
+ 'crypto_key_size': 256,
+ 'dh_group': 15, # "modp-3072"
+ },
+ 'esp_transforms': {
+ 'crypto_alg': 12, # "aes-cbc"
+ 'crypto_key_size': 256,
+ # "hmac-sha2-256-128"
+ 'integ_alg': 12}})
+
+
+@tag_fixme_vpp_workers
+class TestInitiatorPsk(TemplateInitiator, Ikev2Params):
+ """ test ikev2 initiator - pre shared key auth """
+
+ def config_tc(self):
+ self.config_params({
+ 'is_initiator': False, # seen from test case perspective
+ # thus vpp is initiator
+ 'ike-crypto': ('AES-GCM-16ICV', 32),
+ 'ike-integ': 'NULL',
+ 'ike-dh': '3072MODPgr',
+ 'ike_transforms': {
+ 'crypto_alg': 20, # "aes-gcm-16"
+ 'crypto_key_size': 256,
+ 'dh_group': 15, # "modp-3072"
+ },
+ 'esp_transforms': {
+ 'crypto_alg': 12, # "aes-cbc"
+ 'crypto_key_size': 256,
+ # "hmac-sha2-256-128"
+ 'integ_alg': 12},
+ 'responder_hostname': {'hostname': 'vpp.responder.org',
+ 'sw_if_index': self.pg0.sw_if_index}})
+
+
+@tag_fixme_vpp_workers
+class TestInitiatorRequestWindowSize(TestInitiatorPsk):
+ """ test initiator - request window size (1) """
+
+ def rekey_respond(self, req, update_child_sa_data):
+ ih = self.get_ike_header(req)
+ plain = self.sa.hmac_and_decrypt(ih)
+ sa = ikev2.IKEv2_payload_SA(plain)
+ if update_child_sa_data:
+ prop = sa[ikev2.IKEv2_payload_Proposal]
+ self.sa.i_nonce = sa[ikev2.IKEv2_payload_Nonce].load
+ self.sa.r_nonce = self.sa.i_nonce
+ self.sa.child_sas[0].ispi = prop.SPI
+ self.sa.child_sas[0].rspi = prop.SPI
+ self.sa.calc_child_keys()
+
+ header = ikev2.IKEv2(init_SPI=self.sa.ispi, resp_SPI=self.sa.rspi,
+ flags='Response', exch_type=36,
+ id=ih.id, next_payload='Encrypted')
+ resp = self.encrypt_ike_msg(header, sa, 'SA')
+ packet = self.create_packet(self.pg0, resp, self.sa.sport,
+ self.sa.dport, self.sa.natt, self.ip6)
+ self.send_and_assert_no_replies(self.pg0, packet)
+
+ def test_initiator(self):
+ super(TestInitiatorRequestWindowSize, self).test_initiator()
+ self.pg0.enable_capture()
+ self.pg_start()
+ ispi = int.from_bytes(self.sa.child_sas[0].ispi, 'little')
+ self.vapi.ikev2_initiate_rekey_child_sa(ispi=ispi)
+ self.vapi.ikev2_initiate_rekey_child_sa(ispi=ispi)
+ capture = self.pg0.get_capture(2)
+
+ # reply in reverse order
+ self.rekey_respond(capture[1], True)
+ self.rekey_respond(capture[0], False)
+
+ # verify that only the second request was accepted
+ self.verify_ike_sas()
+ self.verify_ipsec_sas(is_rekey=True)
+
+
+@tag_fixme_vpp_workers
+class TestInitiatorRekey(TestInitiatorPsk):
+ """ test ikev2 initiator - rekey """
+
+ def rekey_from_initiator(self):
+ ispi = int.from_bytes(self.sa.child_sas[0].ispi, 'little')
+ self.pg0.enable_capture()
+ self.pg_start()
+ self.vapi.ikev2_initiate_rekey_child_sa(ispi=ispi)
+ capture = self.pg0.get_capture(1)
+ ih = self.get_ike_header(capture[0])
+ self.assertEqual(ih.exch_type, 36) # CHILD_SA
+ self.assertNotIn('Response', ih.flags)
+ self.assertIn('Initiator', ih.flags)
+ plain = self.sa.hmac_and_decrypt(ih)
+ sa = ikev2.IKEv2_payload_SA(plain)
+ prop = sa[ikev2.IKEv2_payload_Proposal]
+ self.sa.i_nonce = sa[ikev2.IKEv2_payload_Nonce].load
+ self.sa.r_nonce = self.sa.i_nonce
+ # update new responder SPI
+ self.sa.child_sas[0].ispi = prop.SPI
+ self.sa.child_sas[0].rspi = prop.SPI
+ self.sa.calc_child_keys()
+ header = ikev2.IKEv2(init_SPI=self.sa.ispi, resp_SPI=self.sa.rspi,
+ flags='Response', exch_type=36,
+ id=ih.id, next_payload='Encrypted')
+ resp = self.encrypt_ike_msg(header, sa, 'SA')
+ packet = self.create_packet(self.pg0, resp, self.sa.sport,
+ self.sa.dport, self.sa.natt, self.ip6)
+ self.send_and_assert_no_replies(self.pg0, packet)
+
+ def test_initiator(self):
+ super(TestInitiatorRekey, self).test_initiator()
+ self.rekey_from_initiator()
+ self.verify_ike_sas()
+ self.verify_ipsec_sas(is_rekey=True)
+
+
+@tag_fixme_vpp_workers
+class TestInitiatorDelSAFromResponder(TemplateInitiator, Ikev2Params):
+ """ test ikev2 initiator - delete IKE SA from responder """
+
+ def config_tc(self):
+ self.config_params({
+ 'del_sa_from_responder': True,
+ 'is_initiator': False, # seen from test case perspective
+ # thus vpp is initiator
+ 'responder': {'sw_if_index': self.pg0.sw_if_index,
+ 'addr': self.pg0.remote_ip4},
+ 'ike-crypto': ('AES-GCM-16ICV', 32),
+ 'ike-integ': 'NULL',
+ 'ike-dh': '3072MODPgr',
+ 'ike_transforms': {
+ 'crypto_alg': 20, # "aes-gcm-16"
+ 'crypto_key_size': 256,
+ 'dh_group': 15, # "modp-3072"
+ },
+ 'esp_transforms': {
+ 'crypto_alg': 12, # "aes-cbc"
+ 'crypto_key_size': 256,
+ # "hmac-sha2-256-128"
+ 'integ_alg': 12}})
+
+
+@tag_fixme_vpp_workers
+class TestResponderInitBehindNATT(TemplateResponder, Ikev2Params):
+ """ test ikev2 responder - initiator behind NAT """
+
+ IKE_NODE_SUFFIX = 'ip4-natt'
+
+ def config_tc(self):
+ self.config_params(
+ {'i_natt': True})
+
+
+@tag_fixme_vpp_workers
+class TestResponderPsk(TemplateResponder, Ikev2Params):
+ """ test ikev2 responder - pre shared key auth """
+ def config_tc(self):
+ self.config_params()
+
+
+@tag_fixme_vpp_workers
+class TestResponderDpd(TestResponderPsk):
+ """
+ Dead peer detection test
+ """
+ def config_tc(self):
+ self.config_params({'dpd_disabled': False})
+
+ def tearDown(self):
+ pass
+
+ def test_responder(self):
+ self.vapi.ikev2_profile_set_liveness(period=2, max_retries=1)
+ super(TestResponderDpd, self).test_responder()
+ self.pg0.enable_capture()
+ self.pg_start()
+ # capture empty request but don't reply
+ capture = self.pg0.get_capture(expected_count=1, timeout=5)
+ ih = self.get_ike_header(capture[0])
+ self.assertEqual(ih.exch_type, 37) # INFORMATIONAL
+ plain = self.sa.hmac_and_decrypt(ih)
+ self.assertEqual(plain, b'')
+ # wait for SA expiration
+ time.sleep(3)
+ ike_sas = self.vapi.ikev2_sa_dump()
+ self.assertEqual(len(ike_sas), 0)
+ ipsec_sas = self.vapi.ipsec_sa_dump()
+ self.assertEqual(len(ipsec_sas), 0)
+
+
+@tag_fixme_vpp_workers
+class TestResponderRekey(TestResponderPsk):
+ """ test ikev2 responder - rekey """
+
+ def rekey_from_initiator(self):
+ packet = self.create_rekey_request()
+ self.pg0.add_stream(packet)
+ self.pg0.enable_capture()
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ ih = self.get_ike_header(capture[0])
+ plain = self.sa.hmac_and_decrypt(ih)
+ sa = ikev2.IKEv2_payload_SA(plain)
+ prop = sa[ikev2.IKEv2_payload_Proposal]
+ self.sa.r_nonce = sa[ikev2.IKEv2_payload_Nonce].load
+ # update new responder SPI
+ self.sa.child_sas[0].rspi = prop.SPI
+
+ def test_responder(self):
+ super(TestResponderRekey, self).test_responder()
+ self.rekey_from_initiator()
+ self.sa.calc_child_keys()
+ self.verify_ike_sas()
+ self.verify_ipsec_sas(is_rekey=True)
+ self.assert_counter(1, 'rekey_req', 'ip4')
+ r = self.vapi.ikev2_sa_dump()
+ self.assertEqual(r[0].sa.stats.n_rekey_req, 1)
+
+
+class TestResponderVrf(TestResponderPsk, Ikev2Params):
+ """ test ikev2 responder - non-default table id """
+
+ @classmethod
+ def setUpClass(cls):
+ import scapy.contrib.ikev2 as _ikev2
+ globals()['ikev2'] = _ikev2
+ super(IkePeer, cls).setUpClass()
+ cls.create_pg_interfaces(range(1))
+ cls.vapi.cli("ip table add 1")
+ cls.vapi.cli("set interface ip table pg0 1")
+ for i in cls.pg_interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+ i.config_ip6()
+ i.resolve_ndp()
+
+ def config_tc(self):
+ self.config_params({'dpd_disabled': False})
+
+ def test_responder(self):
+ self.vapi.ikev2_profile_set_liveness(period=2, max_retries=1)
+ super(TestResponderVrf, self).test_responder()
+ self.pg0.enable_capture()
+ self.pg_start()
+ capture = self.pg0.get_capture(expected_count=1, timeout=5)
+ ih = self.get_ike_header(capture[0])
+ self.assertEqual(ih.exch_type, 37) # INFORMATIONAL
+ plain = self.sa.hmac_and_decrypt(ih)
+ self.assertEqual(plain, b'')
+
+
+@tag_fixme_vpp_workers
+class TestResponderRsaSign(TemplateResponder, Ikev2Params):
+ """ test ikev2 responder - cert based auth """
+ def config_tc(self):
+ self.config_params({
+ 'udp_encap': True,
+ 'auth': 'rsa-sig',
+ 'server-key': 'server-key.pem',
+ 'client-key': 'client-key.pem',
+ 'client-cert': 'client-cert.pem',
+ 'server-cert': 'server-cert.pem'})
+
+
+@tag_fixme_vpp_workers
+class Test_IKE_AES_CBC_128_SHA256_128_MODP2048_ESP_AES_CBC_192_SHA_384_192\
+ (TemplateResponder, Ikev2Params):
+ """
+ IKE:AES_CBC_128_SHA256_128,DH=modp2048 ESP:AES_CBC_192_SHA_384_192
+ """
+ def config_tc(self):
+ self.config_params({
+ 'ike-crypto': ('AES-CBC', 16),
+ 'ike-integ': 'SHA2-256-128',
+ 'esp-crypto': ('AES-CBC', 24),
+ 'esp-integ': 'SHA2-384-192',
+ 'ike-dh': '2048MODPgr'})
+
+
+@tag_fixme_vpp_workers
+class TestAES_CBC_128_SHA256_128_MODP3072_ESP_AES_GCM_16\
+ (TemplateResponder, Ikev2Params):
+
+ """
+ IKE:AES_CBC_128_SHA256_128,DH=modp3072 ESP:AES_GCM_16
+ """
+ def config_tc(self):
+ self.config_params({
+ 'ike-crypto': ('AES-CBC', 32),
+ 'ike-integ': 'SHA2-256-128',
+ 'esp-crypto': ('AES-GCM-16ICV', 32),
+ 'esp-integ': 'NULL',
+ 'ike-dh': '3072MODPgr'})
+
+
+@tag_fixme_vpp_workers
+class Test_IKE_AES_GCM_16_256(TemplateResponder, Ikev2Params):
+ """
+ IKE:AES_GCM_16_256
+ """
+
+ IKE_NODE_SUFFIX = 'ip6'
+
+ def config_tc(self):
+ self.config_params({
+ 'del_sa_from_responder': True,
+ 'ip6': True,
+ 'natt': True,
+ 'ike-crypto': ('AES-GCM-16ICV', 32),
+ 'ike-integ': 'NULL',
+ 'ike-dh': '2048MODPgr',
+ 'loc_ts': {'start_addr': 'ab:cd::0',
+ 'end_addr': 'ab:cd::10'},
+ 'rem_ts': {'start_addr': '11::0',
+ 'end_addr': '11::100'}})
+
+
+@tag_fixme_vpp_workers
+class TestInitiatorKeepaliveMsg(TestInitiatorPsk):
+ """
+ Test for keep alive messages
+ """
+
+ def send_empty_req_from_responder(self):
+ packet = self.create_empty_request()
+ self.pg0.add_stream(packet)
+ self.pg0.enable_capture()
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ ih = self.get_ike_header(capture[0])
+ self.assertEqual(ih.id, self.sa.msg_id)
+ plain = self.sa.hmac_and_decrypt(ih)
+ self.assertEqual(plain, b'')
+ self.assert_counter(1, 'keepalive', 'ip4')
+ r = self.vapi.ikev2_sa_dump()
+ self.assertEqual(1, r[0].sa.stats.n_keepalives)
+
+ def test_initiator(self):
+ super(TestInitiatorKeepaliveMsg, self).test_initiator()
+ self.send_empty_req_from_responder()
+
+
+class TestMalformedMessages(TemplateResponder, Ikev2Params):
+ """ malformed packet test """
+
+ def tearDown(self):
+ pass
+
+ def config_tc(self):
+ self.config_params()
+
+ def create_ike_init_msg(self, length=None, payload=None):
+ msg = ikev2.IKEv2(length=length, init_SPI='\x11' * 8,
+ flags='Initiator', exch_type='IKE_SA_INIT')
+ if payload is not None:
+ msg /= payload
+ return self.create_packet(self.pg0, msg, self.sa.sport,
+ self.sa.dport)
+
+ def verify_bad_packet_length(self):
+ ike_msg = self.create_ike_init_msg(length=0xdead)
+ self.send_and_assert_no_replies(self.pg0, ike_msg * self.pkt_count)
+ self.assert_counter(self.pkt_count, 'bad_length')
+
+ def verify_bad_sa_payload_length(self):
+ p = ikev2.IKEv2_payload_SA(length=0xdead)
+ ike_msg = self.create_ike_init_msg(payload=p)
+ self.send_and_assert_no_replies(self.pg0, ike_msg * self.pkt_count)
+ self.assert_counter(self.pkt_count, 'malformed_packet')
+
+ def test_responder(self):
+ self.pkt_count = 254
+ self.verify_bad_packet_length()
+ self.verify_bad_sa_payload_length()
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_ipsec_nat.py b/test/test_ipsec_nat.py
new file mode 100644
index 00000000000..dcedf64b52d
--- /dev/null
+++ b/test/test_ipsec_nat.py
@@ -0,0 +1,271 @@
+#!/usr/bin/env python3
+
+import socket
+
+import scapy.compat
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import ICMP, IP, TCP, UDP
+from scapy.layers.ipsec import SecurityAssociation, ESP
+
+from util import ppp, ppc
+from template_ipsec import TemplateIpsec
+from vpp_ipsec import VppIpsecSA, VppIpsecSpd, VppIpsecSpdEntry,\
+ VppIpsecSpdItfBinding
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_ip import DpoProto
+from vpp_papi import VppEnum
+
+
+class IPSecNATTestCase(TemplateIpsec):
+ """ IPSec/NAT
+ TUNNEL MODE:
+
+
+ public network | private network
+ --- encrypt --- plain ---
+ |pg0| <------- |VPP| <------ |pg1|
+ --- --- ---
+
+ --- decrypt --- plain ---
+ |pg0| -------> |VPP| ------> |pg1|
+ --- --- ---
+ """
+
+ tcp_port_in = 6303
+ tcp_port_out = 6303
+ udp_port_in = 6304
+ udp_port_out = 6304
+ icmp_id_in = 6305
+ icmp_id_out = 6305
+
+ @classmethod
+ def setUpClass(cls):
+ super(IPSecNATTestCase, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(IPSecNATTestCase, cls).tearDownClass()
+
+ def setUp(self):
+ super(IPSecNATTestCase, self).setUp()
+ self.tun_if = self.pg0
+
+ self.tun_spd = VppIpsecSpd(self, self.tun_spd_id)
+ self.tun_spd.add_vpp_config()
+ VppIpsecSpdItfBinding(self, self.tun_spd,
+ self.tun_if).add_vpp_config()
+
+ p = self.ipv4_params
+ self.config_esp_tun(p)
+ self.logger.info(self.vapi.ppcli("show ipsec all"))
+
+ d = DpoProto.DPO_PROTO_IP6 if p.is_ipv6 else DpoProto.DPO_PROTO_IP4
+ VppIpRoute(self, p.remote_tun_if_host, p.addr_len,
+ [VppRoutePath(self.tun_if.remote_addr[p.addr_type],
+ 0xffffffff,
+ proto=d)]).add_vpp_config()
+
+ def tearDown(self):
+ super(IPSecNATTestCase, self).tearDown()
+
+ def create_stream_plain(self, src_mac, dst_mac, src_ip, dst_ip):
+ return [
+ # TCP
+ Ether(src=src_mac, dst=dst_mac) /
+ IP(src=src_ip, dst=dst_ip) /
+ TCP(sport=self.tcp_port_in, dport=20),
+ # UDP
+ Ether(src=src_mac, dst=dst_mac) /
+ IP(src=src_ip, dst=dst_ip) /
+ UDP(sport=self.udp_port_in, dport=20),
+ # ICMP
+ Ether(src=src_mac, dst=dst_mac) /
+ IP(src=src_ip, dst=dst_ip) /
+ ICMP(id=self.icmp_id_in, type='echo-request')
+ ]
+
+ def create_stream_encrypted(self, src_mac, dst_mac, src_ip, dst_ip, sa):
+ return [
+ # TCP
+ Ether(src=src_mac, dst=dst_mac) /
+ sa.encrypt(IP(src=src_ip, dst=dst_ip) /
+ TCP(dport=self.tcp_port_out, sport=20)),
+ # UDP
+ Ether(src=src_mac, dst=dst_mac) /
+ sa.encrypt(IP(src=src_ip, dst=dst_ip) /
+ UDP(dport=self.udp_port_out, sport=20)),
+ # ICMP
+ Ether(src=src_mac, dst=dst_mac) /
+ sa.encrypt(IP(src=src_ip, dst=dst_ip) /
+ ICMP(id=self.icmp_id_out, type='echo-request'))
+ ]
+
+ def verify_capture_plain(self, capture):
+ for packet in capture:
+ try:
+ self.assert_packet_checksums_valid(packet)
+ self.assert_equal(packet[IP].src, self.tun_if.remote_ip4,
+ "decrypted packet source address")
+ self.assert_equal(packet[IP].dst, self.pg1.remote_ip4,
+ "decrypted packet destination address")
+ if packet.haslayer(TCP):
+ self.assertFalse(
+ packet.haslayer(UDP),
+ "unexpected UDP header in decrypted packet")
+ self.assert_equal(packet[TCP].dport, self.tcp_port_in,
+ "decrypted packet TCP destination port")
+ elif packet.haslayer(UDP):
+ if packet[UDP].payload:
+ self.assertFalse(
+ packet[UDP][1].haslayer(UDP),
+ "unexpected UDP header in decrypted packet")
+ self.assert_equal(packet[UDP].dport, self.udp_port_in,
+ "decrypted packet UDP destination port")
+ else:
+ self.assertFalse(
+ packet.haslayer(UDP),
+ "unexpected UDP header in decrypted packet")
+ self.assert_equal(packet[ICMP].id, self.icmp_id_in,
+ "decrypted packet ICMP ID")
+ except Exception:
+ self.logger.error(
+ ppp("Unexpected or invalid plain packet:", packet))
+ raise
+
+ def verify_capture_encrypted(self, capture, sa):
+ for packet in capture:
+ try:
+ copy = packet.__class__(scapy.compat.raw(packet))
+ del copy[UDP].len
+ copy = packet.__class__(scapy.compat.raw(copy))
+ self.assert_equal(packet[UDP].len, copy[UDP].len,
+ "UDP header length")
+ self.assert_packet_checksums_valid(packet)
+ self.assertIn(ESP, packet[IP])
+ decrypt_pkt = sa.decrypt(packet[IP])
+ self.assert_packet_checksums_valid(decrypt_pkt)
+ self.assert_equal(decrypt_pkt[IP].src, self.pg1.remote_ip4,
+ "encrypted packet source address")
+ self.assert_equal(decrypt_pkt[IP].dst, self.tun_if.remote_ip4,
+ "encrypted packet destination address")
+ except Exception:
+ self.logger.error(
+ ppp("Unexpected or invalid encrypted packet:", packet))
+ raise
+
+ def config_esp_tun(self, params):
+ addr_type = params.addr_type
+ scapy_tun_sa_id = params.scapy_tun_sa_id
+ scapy_tun_spi = params.scapy_tun_spi
+ vpp_tun_sa_id = params.vpp_tun_sa_id
+ vpp_tun_spi = params.vpp_tun_spi
+ auth_algo_vpp_id = params.auth_algo_vpp_id
+ auth_key = params.auth_key
+ crypt_algo_vpp_id = params.crypt_algo_vpp_id
+ crypt_key = params.crypt_key
+ addr_any = params.addr_any
+ addr_bcast = params.addr_bcast
+ flags = (VppEnum.vl_api_ipsec_sad_flags_t.
+ IPSEC_API_SAD_FLAG_UDP_ENCAP)
+ e = VppEnum.vl_api_ipsec_spd_action_t
+
+ VppIpsecSA(self, scapy_tun_sa_id, scapy_tun_spi,
+ auth_algo_vpp_id, auth_key,
+ crypt_algo_vpp_id, crypt_key,
+ self.vpp_esp_protocol,
+ self.pg1.remote_addr[addr_type],
+ self.tun_if.remote_addr[addr_type],
+ flags=flags).add_vpp_config()
+ VppIpsecSA(self, vpp_tun_sa_id, vpp_tun_spi,
+ auth_algo_vpp_id, auth_key,
+ crypt_algo_vpp_id, crypt_key,
+ self.vpp_esp_protocol,
+ self.tun_if.remote_addr[addr_type],
+ self.pg1.remote_addr[addr_type],
+ flags=flags).add_vpp_config()
+
+ VppIpsecSpdEntry(self, self.tun_spd, scapy_tun_sa_id,
+ addr_any, addr_bcast,
+ addr_any, addr_bcast,
+ socket.IPPROTO_ESP).add_vpp_config()
+ VppIpsecSpdEntry(self, self.tun_spd, scapy_tun_sa_id,
+ addr_any, addr_bcast,
+ addr_any, addr_bcast,
+ socket.IPPROTO_ESP,
+ is_outbound=0).add_vpp_config()
+ VppIpsecSpdEntry(self, self.tun_spd, scapy_tun_sa_id,
+ addr_any, addr_bcast,
+ addr_any, addr_bcast,
+ socket.IPPROTO_UDP,
+ remote_port_start=4500,
+ remote_port_stop=4500).add_vpp_config()
+ VppIpsecSpdEntry(self, self.tun_spd, scapy_tun_sa_id,
+ addr_any, addr_bcast,
+ addr_any, addr_bcast,
+ socket.IPPROTO_UDP,
+ remote_port_start=4500,
+ remote_port_stop=4500,
+ is_outbound=0).add_vpp_config()
+ VppIpsecSpdEntry(self, self.tun_spd, vpp_tun_sa_id,
+ self.tun_if.remote_addr[addr_type],
+ self.tun_if.remote_addr[addr_type],
+ self.pg1.remote_addr[addr_type],
+ self.pg1.remote_addr[addr_type],
+ 0, priority=10,
+ policy=e.IPSEC_API_SPD_ACTION_PROTECT,
+ is_outbound=0).add_vpp_config()
+ VppIpsecSpdEntry(self, self.tun_spd, scapy_tun_sa_id,
+ self.pg1.remote_addr[addr_type],
+ self.pg1.remote_addr[addr_type],
+ self.tun_if.remote_addr[addr_type],
+ self.tun_if.remote_addr[addr_type],
+ 0, policy=e.IPSEC_API_SPD_ACTION_PROTECT,
+ priority=10).add_vpp_config()
+
+ def test_ipsec_nat_tun(self):
+ """ IPSec/NAT tunnel test case """
+ p = self.ipv4_params
+ scapy_tun_sa = SecurityAssociation(ESP, spi=p.scapy_tun_spi,
+ crypt_algo=p.crypt_algo,
+ crypt_key=p.crypt_key,
+ auth_algo=p.auth_algo,
+ auth_key=p.auth_key,
+ tunnel_header=IP(
+ src=self.pg1.remote_ip4,
+ dst=self.tun_if.remote_ip4),
+ nat_t_header=UDP(
+ sport=4500,
+ dport=4500))
+ # in2out - from private network to public
+ pkts = self.create_stream_plain(
+ self.pg1.remote_mac, self.pg1.local_mac,
+ self.pg1.remote_ip4, self.tun_if.remote_ip4)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.tun_if.get_capture(len(pkts))
+ self.verify_capture_encrypted(capture, scapy_tun_sa)
+
+ vpp_tun_sa = SecurityAssociation(ESP,
+ spi=p.vpp_tun_spi,
+ crypt_algo=p.crypt_algo,
+ crypt_key=p.crypt_key,
+ auth_algo=p.auth_algo,
+ auth_key=p.auth_key,
+ tunnel_header=IP(
+ src=self.tun_if.remote_ip4,
+ dst=self.pg1.remote_ip4),
+ nat_t_header=UDP(
+ sport=4500,
+ dport=4500))
+
+ # out2in - from public network to private
+ pkts = self.create_stream_encrypted(
+ self.tun_if.remote_mac, self.tun_if.local_mac,
+ self.tun_if.remote_ip4, self.pg1.remote_ip4, vpp_tun_sa)
+ self.logger.info(ppc("Sending packets:", pkts))
+ self.tun_if.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_plain(capture)
diff --git a/test/test_l2tp.py b/test/test_l2tp.py
new file mode 100644
index 00000000000..5a665238260
--- /dev/null
+++ b/test/test_l2tp.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python3
+
+import unittest
+
+from scapy.layers.l2 import Ether
+from scapy.layers.inet6 import IPv6
+
+from framework import tag_fixme_vpp_workers
+from framework import VppTestCase
+
+
+@tag_fixme_vpp_workers
+class TestL2tp(VppTestCase):
+ """ L2TP Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestL2tp, cls).setUpClass()
+
+ cls.create_pg_interfaces(range(1))
+ cls.pg0.admin_up()
+ cls.pg0.config_ip6()
+
+ def test_l2tp_decap_local(self):
+ """ L2TP don't accept packets unless configured """
+
+ pkt = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(src=self.pg0.remote_ip6, dst=self.pg0.local_ip6, nh=115))
+
+ self.pg0.add_stream(pkt)
+ self.pg_start()
+
+ # l2tp should not accept packets
+ err = self.statistics.get_counter(
+ '/err/l2tp-decap-local/l2tpv3 session not found')[0]
+ self.assertEqual(err, 0)
+ err_count = err
+
+ self.vapi.l2tpv3_create_tunnel(client_address=self.pg0.local_ip6,
+ our_address=self.pg0.remote_ip6)
+
+ self.pg0.add_stream(pkt)
+ self.pg_start()
+
+ # l2tp accepts packets
+ err = self.statistics.get_counter(
+ '/err/l2tp-decap-local/l2tpv3 session not found')[0]
+ self.assertEqual(err, 1)
+ err_count = err
diff --git a/test/test_l3xc.py b/test/test_l3xc.py
new file mode 100644
index 00000000000..d7a82976cf5
--- /dev/null
+++ b/test/test_l3xc.py
@@ -0,0 +1,152 @@
+#!/usr/bin/env python3
+
+from socket import inet_pton, inet_ntop, AF_INET, AF_INET6
+import unittest
+
+from framework import VppTestCase, VppTestRunner
+from vpp_ip import DpoProto
+from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsLabel, VppIpTable
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, UDP
+from scapy.layers.inet6 import IPv6
+
+from vpp_object import VppObject
+
+NUM_PKTS = 67
+
+
+def find_l3xc(test, sw_if_index, dump_sw_if_index=None):
+ if not dump_sw_if_index:
+ dump_sw_if_index = sw_if_index
+ xcs = test.vapi.l3xc_dump(dump_sw_if_index)
+ for xc in xcs:
+ if sw_if_index == xc.l3xc.sw_if_index:
+ return True
+ return False
+
+
+class VppL3xc(VppObject):
+
+ def __init__(self, test, intf, paths, is_ip6=False):
+ self._test = test
+ self.intf = intf
+ self.is_ip6 = is_ip6
+ self.paths = paths
+ self.encoded_paths = []
+ for path in self.paths:
+ self.encoded_paths.append(path.encode())
+
+ def add_vpp_config(self):
+ self._test.vapi.l3xc_update(
+ l3xc={
+ 'is_ip6': self.is_ip6,
+ 'sw_if_index': self.intf.sw_if_index,
+ 'n_paths': len(self.paths),
+ 'paths': self.encoded_paths
+ })
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.l3xc_del(
+ is_ip6=self.is_ip6,
+ sw_if_index=self.intf.sw_if_index)
+
+ def query_vpp_config(self):
+ return find_l3xc(self._test, self.intf.sw_if_index)
+
+ def object_id(self):
+ return ("l3xc-%d" % self.intf.sw_if_index)
+
+
+class TestL3xc(VppTestCase):
+ """ L3XC Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestL3xc, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestL3xc, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestL3xc, self).setUp()
+
+ self.create_pg_interfaces(range(6))
+
+ for i in self.pg_interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+ i.config_ip6()
+ i.resolve_ndp()
+
+ def tearDown(self):
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.unconfig_ip6()
+ i.admin_down()
+ super(TestL3xc, self).tearDown()
+
+ def send_and_expect_load_balancing(self, input, pkts, outputs):
+ self.pg_send(input, pkts)
+ rxs = []
+ for oo in outputs:
+ rx = oo._get_capture(1)
+ self.assertNotEqual(0, len(rx))
+ for r in rx:
+ rxs.append(r)
+ return rxs
+
+ def test_l3xc4(self):
+ """ IPv4 X-Connect """
+
+ #
+ # x-connect pg0 to pg1 and pg2 to pg3->5
+ #
+ l3xc_1 = VppL3xc(self, self.pg0,
+ [VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index)])
+ l3xc_1.add_vpp_config()
+ l3xc_2 = VppL3xc(self, self.pg2,
+ [VppRoutePath(self.pg3.remote_ip4,
+ self.pg3.sw_if_index),
+ VppRoutePath(self.pg4.remote_ip4,
+ self.pg4.sw_if_index),
+ VppRoutePath(self.pg5.remote_ip4,
+ self.pg5.sw_if_index)])
+ l3xc_2.add_vpp_config()
+
+ self.assertTrue(find_l3xc(self, self.pg2.sw_if_index, 0xffffffff))
+
+ self.logger.info(self.vapi.cli("sh l3xc"))
+
+ #
+ # fire in packets. If it's forwarded then the L3XC was successful,
+ # since default routing will drop it
+ #
+ p_1 = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg0.local_mac) /
+ IP(src="1.1.1.1", dst="1.1.1.2") /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ # self.send_and_expect(self.pg0, p_1*NUM_PKTS, self.pg1)
+
+ p_2 = []
+ for ii in range(NUM_PKTS):
+ p_2.append(Ether(src=self.pg0.remote_mac,
+ dst=self.pg0.local_mac) /
+ IP(src="1.1.1.1", dst="1.1.1.2") /
+ UDP(sport=1000 + ii, dport=1234) /
+ Raw(b'\xa5' * 100))
+ self.send_and_expect_load_balancing(self.pg2, p_2,
+ [self.pg3, self.pg4, self.pg5])
+
+ l3xc_2.remove_vpp_config()
+ self.send_and_assert_no_replies(self.pg2, p_2)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_lacp.py b/test/test_lacp.py
new file mode 100644
index 00000000000..b5f2dae2cd3
--- /dev/null
+++ b/test/test_lacp.py
@@ -0,0 +1,364 @@
+#!/usr/bin/env python3
+
+import time
+import unittest
+
+from scapy.contrib.lacp import LACP, SlowProtocol, MarkerProtocol
+from scapy.layers.l2 import Ether
+
+from framework import VppTestCase, VppTestRunner
+from vpp_memif import remove_all_memif_vpp_config, VppSocketFilename, VppMemif
+from vpp_bond_interface import VppBondInterface
+from vpp_papi import VppEnum, MACAddress
+
+bond_mac = "02:02:02:02:02:02"
+lacp_dst_mac = '01:80:c2:00:00:02'
+LACP_COLLECTION_AND_DISTRIBUTION_STATE = 63
+
+
+class TestMarker(VppTestCase):
+ """LACP Marker Protocol Test Case
+
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+ # Test variables
+ cls.pkts_per_burst = 257 # Number of packets per burst
+ # create 3 pg interfaces
+ cls.create_pg_interfaces(range(1))
+
+ # packet sizes
+ cls.pg_if_packet_sizes = [64, 512, 1518] # , 9018]
+
+ # setup all interfaces
+ for i in cls.pg_interfaces:
+ i.admin_up()
+
+ @classmethod
+ def tearDownClass(cls):
+ super().tearDownClass()
+
+ def setUp(self):
+ super().setUp()
+
+ def tearDown(self):
+ super().tearDown()
+
+ def show_commands_at_teardown(self):
+ self.logger.info(self.vapi.ppcli("show interface"))
+
+ def test_marker_request(self):
+ """ Marker Request test """
+
+ # topology
+ #
+ # +-+ +-+
+ # memif1 -----|B| |B|---- memif11
+ # |o| |o|
+ # |n|------|n|
+ # |d| |d|
+ # pg0 -----|0| |1|
+ # +-+ +-+
+
+ socket1 = VppSocketFilename(
+ self,
+ socket_id=1,
+ socket_filename="%s/memif.sock1" % self.tempdir)
+ socket1.add_vpp_config()
+
+ socket11 = VppSocketFilename(
+ self,
+ socket_id=2,
+ socket_filename="%s/memif.sock1" % self.tempdir)
+ socket11.add_vpp_config()
+
+ memif1 = VppMemif(
+ self,
+ role=VppEnum.vl_api_memif_role_t.MEMIF_ROLE_API_MASTER,
+ mode=VppEnum.vl_api_memif_mode_t.MEMIF_MODE_API_ETHERNET,
+ socket_id=1)
+ memif1.add_vpp_config()
+ memif1.admin_up()
+
+ memif11 = VppMemif(
+ self,
+ role=VppEnum.vl_api_memif_role_t.MEMIF_ROLE_API_SLAVE,
+ mode=VppEnum.vl_api_memif_mode_t.MEMIF_MODE_API_ETHERNET,
+ socket_id=2)
+ memif11.add_vpp_config()
+ memif11.admin_up()
+
+ bond0 = VppBondInterface(
+ self,
+ mode=VppEnum.vl_api_bond_mode_t.BOND_API_MODE_LACP,
+ use_custom_mac=1,
+ mac_address=bond_mac)
+
+ bond0.add_vpp_config()
+ bond0.admin_up()
+
+ bond1 = VppBondInterface(
+ self,
+ mode=VppEnum.vl_api_bond_mode_t.BOND_API_MODE_LACP)
+ bond1.add_vpp_config()
+ bond1.admin_up()
+
+ bond0.add_member_vpp_bond_interface(sw_if_index=memif1.sw_if_index)
+ bond1.add_member_vpp_bond_interface(sw_if_index=memif11.sw_if_index)
+
+ # wait for memif protocol exchange and hardware carrier to come up
+ self.assertEqual(memif1.wait_for_link_up(10), True)
+ self.assertEqual(memif11.wait_for_link_up(10), True)
+
+ # verify memif1 in bond0
+ intfs = self.vapi.sw_member_interface_dump(
+ sw_if_index=bond0.sw_if_index)
+ for intf in intfs:
+ self.assertEqual(intf.sw_if_index, memif1.sw_if_index)
+
+ # verify memif11 in bond1
+ intfs = self.vapi.sw_member_interface_dump(
+ sw_if_index=bond1.sw_if_index)
+ for intf in intfs:
+ self.assertEqual(intf.sw_if_index, memif11.sw_if_index)
+
+ self.vapi.ppcli("trace add memif-input 100")
+
+ # create marker request
+ marker = (Ether(src=bond_mac, dst=lacp_dst_mac) /
+ SlowProtocol() /
+ MarkerProtocol(marker_type=1,
+ requester_port=1,
+ requester_system=bond_mac,
+ requester_transaction_id=1))
+
+ bond1.add_member_vpp_bond_interface(sw_if_index=self.pg0.sw_if_index)
+ self.pg0.add_stream(marker)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ show_trace = self.vapi.ppcli("show trace max 100")
+ self.assertIn("Marker Information TLV:", show_trace)
+
+ bond0.remove_vpp_config()
+ bond1.remove_vpp_config()
+
+
+class TestLACP(VppTestCase):
+ """LACP Test Case
+
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super().tearDownClass()
+
+ def setUp(self):
+ super().setUp()
+
+ def tearDown(self):
+ super().tearDown()
+
+ def show_commands_at_teardown(self):
+ self.logger.info(self.vapi.ppcli("show interface"))
+
+ def wait_for_lacp_connect(self, timeout, step=1):
+ while 1:
+ intfs = self.vapi.sw_interface_lacp_dump()
+ all_good = 1
+ for intf in intfs:
+ if ((intf.actor_state !=
+ LACP_COLLECTION_AND_DISTRIBUTION_STATE) or
+ (intf.partner_state !=
+ LACP_COLLECTION_AND_DISTRIBUTION_STATE)):
+ all_good = 0
+ if (all_good == 1):
+ return 1
+ self.sleep(step)
+ timeout -= step
+ if timeout <= 0:
+ return 0
+
+ def wait_for_member_detach(self, bond, timeout, count, step=1):
+ while 1:
+ intfs = self.vapi.sw_bond_interface_dump(
+ sw_if_index=bond.sw_if_index)
+ for intf in intfs:
+ if ((intf.members == count) and
+ (intf.active_members == count)):
+ return 1
+ else:
+ self.sleep(1)
+ timeout -= step
+ if (timeouut <= 0):
+ return 0
+
+ def test_lacp_connect(self):
+ """ LACP protocol connect test """
+
+ # topology
+ #
+ # +-+ +-+
+ # memif1 -----|B| |B|---- memif11
+ # |o| |o|
+ # |n|------|n|
+ # |d| |d|
+ # memif2 -----|0| |1|---- memif12
+ # +-+ +-+
+
+ socket1 = VppSocketFilename(
+ self,
+ socket_id=1,
+ socket_filename="%s/memif.sock1" % self.tempdir)
+ socket1.add_vpp_config()
+
+ socket11 = VppSocketFilename(
+ self,
+ socket_id=2,
+ socket_filename="%s/memif.sock1" % self.tempdir)
+ socket11.add_vpp_config()
+
+ socket2 = VppSocketFilename(
+ self,
+ socket_id=3,
+ socket_filename="%s/memif.sock2" % self.tempdir)
+ socket2.add_vpp_config()
+
+ socket22 = VppSocketFilename(
+ self,
+ socket_id=4,
+ socket_filename="%s/memif.sock2" % self.tempdir)
+ socket22.add_vpp_config()
+
+ memif1 = VppMemif(
+ self,
+ role=VppEnum.vl_api_memif_role_t.MEMIF_ROLE_API_MASTER,
+ mode=VppEnum.vl_api_memif_mode_t.MEMIF_MODE_API_ETHERNET,
+ socket_id=1)
+ memif1.add_vpp_config()
+ memif1.admin_up()
+
+ memif11 = VppMemif(
+ self,
+ role=VppEnum.vl_api_memif_role_t.MEMIF_ROLE_API_SLAVE,
+ mode=VppEnum.vl_api_memif_mode_t.MEMIF_MODE_API_ETHERNET,
+ socket_id=2)
+ memif11.add_vpp_config()
+ memif11.admin_up()
+
+ memif2 = VppMemif(
+ self,
+ role=VppEnum.vl_api_memif_role_t.MEMIF_ROLE_API_MASTER,
+ mode=VppEnum.vl_api_memif_mode_t.MEMIF_MODE_API_ETHERNET,
+ socket_id=3)
+ memif2.add_vpp_config()
+ memif2.admin_up()
+
+ memif12 = VppMemif(
+ self,
+ role=VppEnum.vl_api_memif_role_t.MEMIF_ROLE_API_SLAVE,
+ mode=VppEnum.vl_api_memif_mode_t.MEMIF_MODE_API_ETHERNET,
+ socket_id=4)
+ memif12.add_vpp_config()
+ memif12.admin_up()
+
+ self.logger.info(self.vapi.ppcli("debug lacp on"))
+ bond0 = VppBondInterface(
+ self,
+ mode=VppEnum.vl_api_bond_mode_t.BOND_API_MODE_LACP,
+ use_custom_mac=1,
+ mac_address=bond_mac)
+
+ bond0.add_vpp_config()
+ bond0.admin_up()
+
+ bond1 = VppBondInterface(
+ self,
+ mode=VppEnum.vl_api_bond_mode_t.BOND_API_MODE_LACP)
+ bond1.add_vpp_config()
+ bond1.admin_up()
+
+ # add member memif1 and memif2 to bond0
+ bond0.add_member_vpp_bond_interface(sw_if_index=memif1.sw_if_index)
+ bond0.add_member_vpp_bond_interface(sw_if_index=memif2.sw_if_index)
+
+ # add member memif11 and memif12 to bond1
+ bond1.add_member_vpp_bond_interface(sw_if_index=memif11.sw_if_index)
+ bond1.add_member_vpp_bond_interface(sw_if_index=memif12.sw_if_index)
+
+ # wait for memif protocol exchange and hardware carrier to come up
+ self.assertEqual(memif1.wait_for_link_up(10), True)
+ self.assertEqual(memif2.wait_for_link_up(10), True)
+ self.assertEqual(memif11.wait_for_link_up(10), True)
+ self.assertEqual(memif12.wait_for_link_up(10), True)
+
+ # verify memif1 and memif2 in bond0
+ intfs = self.vapi.sw_member_interface_dump(
+ sw_if_index=bond0.sw_if_index)
+ for intf in intfs:
+ self.assertIn(
+ intf.sw_if_index, (memif1.sw_if_index, memif2.sw_if_index))
+
+ # verify memif11 and memif12 in bond1
+ intfs = self.vapi.sw_member_interface_dump(
+ sw_if_index=bond1.sw_if_index)
+ for intf in intfs:
+ self.assertIn(
+ intf.sw_if_index, (memif11.sw_if_index, memif12.sw_if_index))
+ self.assertEqual(intf.is_long_timeout, 0)
+ self.assertEqual(intf.is_passive, 0)
+
+ # Let LACP create the bundle
+ self.wait_for_lacp_connect(30)
+
+ intfs = self.vapi.sw_interface_lacp_dump()
+ for intf in intfs:
+ self.assertEqual(
+ intf.actor_state, LACP_COLLECTION_AND_DISTRIBUTION_STATE)
+ self.assertEqual(
+ intf.partner_state, LACP_COLLECTION_AND_DISTRIBUTION_STATE)
+
+ intfs = self.vapi.sw_bond_interface_dump(sw_if_index=0xFFFFFFFF)
+ for intf in intfs:
+ self.assertEqual(intf.members, 2)
+ self.assertEqual(intf.active_members, 2)
+ self.assertEqual(
+ intf.mode, VppEnum.vl_api_bond_mode_t.BOND_API_MODE_LACP)
+
+ self.logger.info(self.vapi.ppcli("show lacp"))
+ self.logger.info(self.vapi.ppcli("show lacp details"))
+
+ # detach member memif1
+ bond0.detach_vpp_bond_interface(sw_if_index=memif1.sw_if_index)
+
+ self.wait_for_member_detach(bond0, timeout=10, count=1)
+ intfs = self.vapi.sw_bond_interface_dump(
+ sw_if_index=bond0.sw_if_index)
+ for intf in intfs:
+ self.assertEqual(intf.members, 1)
+ self.assertEqual(intf.active_members, 1)
+ self.assertEqual(
+ intf.mode, VppEnum.vl_api_bond_mode_t.BOND_API_MODE_LACP)
+
+ # detach member memif2
+ bond0.detach_vpp_bond_interface(sw_if_index=memif2.sw_if_index)
+ self.wait_for_member_detach(bond0, timeout=10, count=0)
+
+ intfs = self.vapi.sw_bond_interface_dump(
+ sw_if_index=bond0.sw_if_index)
+ for intf in intfs:
+ self.assertEqual(intf.members, 0)
+ self.assertEqual(intf.active_members, 0)
+
+ bond0.remove_vpp_config()
+ bond1.remove_vpp_config()
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_lb.py b/test/test_lb.py
new file mode 100644
index 00000000000..fafb87b62d9
--- /dev/null
+++ b/test/test_lb.py
@@ -0,0 +1,502 @@
+import socket
+
+import scapy.compat
+from scapy.layers.inet import IP, UDP
+from scapy.layers.inet6 import IPv6
+from scapy.layers.l2 import Ether, GRE
+from scapy.packet import Raw
+from scapy.data import IP_PROTOS
+
+from framework import VppTestCase
+from util import ppp
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_ip import INVALID_INDEX
+
+""" TestLB is a subclass of VPPTestCase classes.
+
+ TestLB class defines Load Balancer test cases for:
+ - IP4 to GRE4 encap on per-port vip case
+ - IP4 to GRE6 encap on per-port vip case
+ - IP6 to GRE4 encap on per-port vip case
+ - IP6 to GRE6 encap on per-port vip case
+ - IP4 to L3DSR encap on vip case
+ - IP4 to L3DSR encap on per-port vip case
+ - IP4 to NAT4 encap on per-port vip case
+ - IP6 to NAT6 encap on per-port vip case
+
+ As stated in comments below, GRE has issues with IPv6.
+ All test cases involving IPv6 are executed, but
+ received packets are not parsed and checked.
+
+"""
+
+
+class TestLB(VppTestCase):
+ """ Load Balancer Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestLB, cls).setUpClass()
+
+ cls.ass = range(5)
+ cls.packets = range(1)
+
+ try:
+ cls.create_pg_interfaces(range(2))
+ cls.interfaces = list(cls.pg_interfaces)
+
+ for i in cls.interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.config_ip6()
+ i.disable_ipv6_ra()
+ i.resolve_arp()
+ i.resolve_ndp()
+
+ dst4 = VppIpRoute(cls, "10.0.0.0", 24,
+ [VppRoutePath(cls.pg1.remote_ip4,
+ INVALID_INDEX)],
+ register=False)
+ dst4.add_vpp_config()
+ dst6 = VppIpRoute(cls, "2002::", 16,
+ [VppRoutePath(cls.pg1.remote_ip6,
+ INVALID_INDEX)],
+ register=False)
+ dst6.add_vpp_config()
+ cls.vapi.lb_conf(ip4_src_address="39.40.41.42",
+ ip6_src_address="2004::1")
+ except Exception:
+ super(TestLB, cls).tearDownClass()
+ raise
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestLB, cls).tearDownClass()
+
+ def tearDown(self):
+ super(TestLB, self).tearDown()
+
+ def show_commands_at_teardown(self):
+ self.logger.info(self.vapi.cli("show lb vip verbose"))
+
+ def getIPv4Flow(self, id):
+ return (IP(dst="90.0.%u.%u" % (id / 255, id % 255),
+ src="40.0.%u.%u" % (id / 255, id % 255)) /
+ UDP(sport=10000 + id, dport=20000))
+
+ def getIPv6Flow(self, id):
+ return (IPv6(dst="2001::%u" % (id), src="fd00:f00d:ffff::%u" % (id)) /
+ UDP(sport=10000 + id, dport=20000))
+
+ def generatePackets(self, src_if, isv4):
+ self.reset_packet_infos()
+ pkts = []
+ for pktid in self.packets:
+ info = self.create_packet_info(src_if, self.pg1)
+ payload = self.info_to_payload(info)
+ ip = self.getIPv4Flow(pktid) if isv4 else self.getIPv6Flow(pktid)
+ packet = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ ip /
+ Raw(payload))
+ self.extend_packet(packet, 128)
+ info.data = packet.copy()
+ pkts.append(packet)
+ return pkts
+
+ def checkInner(self, gre, isv4):
+ IPver = IP if isv4 else IPv6
+ self.assertEqual(gre.proto, 0x0800 if isv4 else 0x86DD)
+ self.assertEqual(gre.flags, 0)
+ self.assertEqual(gre.version, 0)
+ inner = IPver(scapy.compat.raw(gre.payload))
+ payload_info = self.payload_to_info(inner[Raw])
+ self.info = self.packet_infos[payload_info.index]
+ self.assertEqual(payload_info.src, self.pg0.sw_if_index)
+ self.assertEqual(scapy.compat.raw(inner),
+ scapy.compat.raw(self.info.data[IPver]))
+
+ def checkCapture(self, encap, isv4):
+ self.pg0.assert_nothing_captured()
+ out = self.pg1.get_capture(len(self.packets))
+
+ load = [0] * len(self.ass)
+ self.info = None
+ for p in out:
+ try:
+ asid = 0
+ gre = None
+ if (encap == 'gre4'):
+ ip = p[IP]
+ asid = int(ip.dst.split(".")[3])
+ self.assertEqual(ip.version, 4)
+ self.assertEqual(ip.flags, 0)
+ self.assertEqual(ip.src, "39.40.41.42")
+ self.assertEqual(ip.dst, "10.0.0.%u" % asid)
+ self.assertEqual(ip.proto, 47)
+ self.assertEqual(len(ip.options), 0)
+ gre = p[GRE]
+ self.checkInner(gre, isv4)
+ elif (encap == 'gre6'):
+ ip = p[IPv6]
+ asid = ip.dst.split(":")
+ asid = asid[len(asid) - 1]
+ asid = 0 if asid == "" else int(asid)
+ self.assertEqual(ip.version, 6)
+ self.assertEqual(ip.tc, 0)
+ self.assertEqual(ip.fl, 0)
+ self.assertEqual(ip.src, "2004::1")
+ self.assertEqual(
+ socket.inet_pton(socket.AF_INET6, ip.dst),
+ socket.inet_pton(socket.AF_INET6, "2002::%u" % asid)
+ )
+ self.assertEqual(ip.nh, 47)
+ # self.assertEqual(len(ip.options), 0)
+ gre = GRE(scapy.compat.raw(p[IPv6].payload))
+ self.checkInner(gre, isv4)
+ elif (encap == 'l3dsr'):
+ ip = p[IP]
+ asid = int(ip.dst.split(".")[3])
+ self.assertEqual(ip.version, 4)
+ self.assertEqual(ip.flags, 0)
+ self.assertEqual(ip.dst, "10.0.0.%u" % asid)
+ self.assertEqual(ip.tos, 0x1c)
+ self.assertEqual(len(ip.options), 0)
+ self.assert_ip_checksum_valid(p)
+ if ip.proto == IP_PROTOS.tcp:
+ self.assert_tcp_checksum_valid(p)
+ elif ip.proto == IP_PROTOS.udp:
+ self.assert_udp_checksum_valid(p)
+ elif (encap == 'nat4'):
+ ip = p[IP]
+ asid = int(ip.dst.split(".")[3])
+ self.assertEqual(ip.version, 4)
+ self.assertEqual(ip.flags, 0)
+ self.assertEqual(ip.dst, "10.0.0.%u" % asid)
+ self.assertEqual(ip.proto, 17)
+ self.assertEqual(len(ip.options), 0)
+ udp = p[UDP]
+ self.assertEqual(udp.dport, 3307)
+ elif (encap == 'nat6'):
+ ip = p[IPv6]
+ asid = ip.dst.split(":")
+ asid = asid[len(asid) - 1]
+ asid = 0 if asid == "" else int(asid)
+ self.assertEqual(ip.version, 6)
+ self.assertEqual(ip.tc, 0)
+ self.assertEqual(ip.fl, 0)
+ self.assertEqual(
+ socket.inet_pton(socket.AF_INET6, ip.dst),
+ socket.inet_pton(socket.AF_INET6, "2002::%u" % asid)
+ )
+ self.assertEqual(ip.nh, 17)
+ self.assertGreaterEqual(ip.hlim, 63)
+ udp = UDP(scapy.compat.raw(p[IPv6].payload))
+ self.assertEqual(udp.dport, 3307)
+ load[asid] += 1
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # This is just to roughly check that the balancing algorithm
+ # is not completely biased.
+ for asid in self.ass:
+ if load[asid] < int(len(self.packets) / (len(self.ass) * 2)):
+ self.logger.error(
+ "ASS is not balanced: load[%d] = %d" % (asid, load[asid]))
+ raise Exception("Load Balancer algorithm is biased")
+
+ def test_lb_ip4_gre4(self):
+ """ Load Balancer IP4 GRE4 on vip case """
+ try:
+ self.vapi.cli(
+ "lb vip 90.0.0.0/8 encap gre4")
+ for asid in self.ass:
+ self.vapi.cli(
+ "lb as 90.0.0.0/8 10.0.0.%u"
+ % (asid))
+
+ self.pg0.add_stream(self.generatePackets(self.pg0, isv4=True))
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.checkCapture(encap='gre4', isv4=True)
+
+ finally:
+ for asid in self.ass:
+ self.vapi.cli(
+ "lb as 90.0.0.0/8 10.0.0.%u del"
+ % (asid))
+ self.vapi.cli(
+ "lb vip 90.0.0.0/8 encap gre4 del")
+ self.vapi.cli("test lb flowtable flush")
+
+ def test_lb_ip6_gre4(self):
+ """ Load Balancer IP6 GRE4 on vip case """
+
+ try:
+ self.vapi.cli(
+ "lb vip 2001::/16 encap gre4")
+ for asid in self.ass:
+ self.vapi.cli(
+ "lb as 2001::/16 10.0.0.%u"
+ % (asid))
+
+ self.pg0.add_stream(self.generatePackets(self.pg0, isv4=False))
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ self.checkCapture(encap='gre4', isv4=False)
+ finally:
+ for asid in self.ass:
+ self.vapi.cli(
+ "lb as 2001::/16 10.0.0.%u del"
+ % (asid))
+ self.vapi.cli(
+ "lb vip 2001::/16 encap gre4 del")
+ self.vapi.cli("test lb flowtable flush")
+
+ def test_lb_ip4_gre6(self):
+ """ Load Balancer IP4 GRE6 on vip case """
+ try:
+ self.vapi.cli(
+ "lb vip 90.0.0.0/8 encap gre6")
+ for asid in self.ass:
+ self.vapi.cli(
+ "lb as 90.0.0.0/8 2002::%u"
+ % (asid))
+
+ self.pg0.add_stream(self.generatePackets(self.pg0, isv4=True))
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ self.checkCapture(encap='gre6', isv4=True)
+ finally:
+ for asid in self.ass:
+ self.vapi.cli(
+ "lb as 90.0.0.0/8 2002::%u del"
+ % (asid))
+ self.vapi.cli(
+ "lb vip 90.0.0.0/8 encap gre6 del")
+ self.vapi.cli("test lb flowtable flush")
+
+ def test_lb_ip6_gre6(self):
+ """ Load Balancer IP6 GRE6 on vip case """
+ try:
+ self.vapi.cli(
+ "lb vip 2001::/16 encap gre6")
+ for asid in self.ass:
+ self.vapi.cli(
+ "lb as 2001::/16 2002::%u"
+ % (asid))
+
+ self.pg0.add_stream(self.generatePackets(self.pg0, isv4=False))
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ self.checkCapture(encap='gre6', isv4=False)
+ finally:
+ for asid in self.ass:
+ self.vapi.cli(
+ "lb as 2001::/16 2002::%u del"
+ % (asid))
+ self.vapi.cli(
+ "lb vip 2001::/16 encap gre6 del")
+ self.vapi.cli("test lb flowtable flush")
+
+ def test_lb_ip4_gre4_port(self):
+ """ Load Balancer IP4 GRE4 on per-port-vip case """
+ try:
+ self.vapi.cli(
+ "lb vip 90.0.0.0/8 protocol udp port 20000 encap gre4")
+ for asid in self.ass:
+ self.vapi.cli(
+ "lb as 90.0.0.0/8 protocol udp port 20000 10.0.0.%u"
+ % (asid))
+
+ self.pg0.add_stream(self.generatePackets(self.pg0, isv4=True))
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.checkCapture(encap='gre4', isv4=True)
+
+ finally:
+ for asid in self.ass:
+ self.vapi.cli(
+ "lb as 90.0.0.0/8 protocol udp port 20000 10.0.0.%u del"
+ % (asid))
+ self.vapi.cli(
+ "lb vip 90.0.0.0/8 protocol udp port 20000 encap gre4 del")
+ self.vapi.cli("test lb flowtable flush")
+
+ def test_lb_ip6_gre4_port(self):
+ """ Load Balancer IP6 GRE4 on per-port-vip case """
+
+ try:
+ self.vapi.cli(
+ "lb vip 2001::/16 protocol udp port 20000 encap gre4")
+ for asid in self.ass:
+ self.vapi.cli(
+ "lb as 2001::/16 protocol udp port 20000 10.0.0.%u"
+ % (asid))
+
+ self.pg0.add_stream(self.generatePackets(self.pg0, isv4=False))
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ self.checkCapture(encap='gre4', isv4=False)
+ finally:
+ for asid in self.ass:
+ self.vapi.cli(
+ "lb as 2001::/16 protocol udp port 20000 10.0.0.%u del"
+ % (asid))
+ self.vapi.cli(
+ "lb vip 2001::/16 protocol udp port 20000 encap gre4 del")
+ self.vapi.cli("test lb flowtable flush")
+
+ def test_lb_ip4_gre6_port(self):
+ """ Load Balancer IP4 GRE6 on per-port-vip case """
+ try:
+ self.vapi.cli(
+ "lb vip 90.0.0.0/8 protocol udp port 20000 encap gre6")
+ for asid in self.ass:
+ self.vapi.cli(
+ "lb as 90.0.0.0/8 protocol udp port 20000 2002::%u"
+ % (asid))
+
+ self.pg0.add_stream(self.generatePackets(self.pg0, isv4=True))
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ self.checkCapture(encap='gre6', isv4=True)
+ finally:
+ for asid in self.ass:
+ self.vapi.cli(
+ "lb as 90.0.0.0/8 protocol udp port 20000 2002::%u del"
+ % (asid))
+ self.vapi.cli(
+ "lb vip 90.0.0.0/8 protocol udp port 20000 encap gre6 del")
+ self.vapi.cli("test lb flowtable flush")
+
+ def test_lb_ip6_gre6_port(self):
+ """ Load Balancer IP6 GRE6 on per-port-vip case """
+ try:
+ self.vapi.cli(
+ "lb vip 2001::/16 protocol udp port 20000 encap gre6")
+ for asid in self.ass:
+ self.vapi.cli(
+ "lb as 2001::/16 protocol udp port 20000 2002::%u"
+ % (asid))
+
+ self.pg0.add_stream(self.generatePackets(self.pg0, isv4=False))
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ self.checkCapture(encap='gre6', isv4=False)
+ finally:
+ for asid in self.ass:
+ self.vapi.cli(
+ "lb as 2001::/16 protocol udp port 20000 2002::%u del"
+ % (asid))
+ self.vapi.cli(
+ "lb vip 2001::/16 protocol udp port 20000 encap gre6 del")
+ self.vapi.cli("test lb flowtable flush")
+
+ def test_lb_ip4_l3dsr(self):
+ """ Load Balancer IP4 L3DSR on vip case """
+ try:
+ self.vapi.cli(
+ "lb vip 90.0.0.0/8 encap l3dsr dscp 7")
+ for asid in self.ass:
+ self.vapi.cli(
+ "lb as 90.0.0.0/8 10.0.0.%u"
+ % (asid))
+
+ self.pg0.add_stream(self.generatePackets(self.pg0, isv4=True))
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.checkCapture(encap='l3dsr', isv4=True)
+
+ finally:
+ for asid in self.ass:
+ self.vapi.cli(
+ "lb as 90.0.0.0/8 10.0.0.%u del"
+ % (asid))
+ self.vapi.cli(
+ "lb vip 90.0.0.0/8 encap l3dsr"
+ " dscp 7 del")
+ self.vapi.cli("test lb flowtable flush")
+
+ def test_lb_ip4_l3dsr_port(self):
+ """ Load Balancer IP4 L3DSR on per-port-vip case """
+ try:
+ self.vapi.cli(
+ "lb vip 90.0.0.0/8 protocol udp port 20000 encap l3dsr dscp 7")
+ for asid in self.ass:
+ self.vapi.cli(
+ "lb as 90.0.0.0/8 protocol udp port 20000 10.0.0.%u"
+ % (asid))
+
+ self.pg0.add_stream(self.generatePackets(self.pg0, isv4=True))
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.checkCapture(encap='l3dsr', isv4=True)
+
+ finally:
+ for asid in self.ass:
+ self.vapi.cli(
+ "lb as 90.0.0.0/8 protocol udp port 20000 10.0.0.%u del"
+ % (asid))
+ self.vapi.cli(
+ "lb vip 90.0.0.0/8 protocol udp port 20000 encap l3dsr"
+ " dscp 7 del")
+ self.vapi.cli("test lb flowtable flush")
+
+ def test_lb_ip4_nat4_port(self):
+ """ Load Balancer IP4 NAT4 on per-port-vip case """
+ try:
+ self.vapi.cli(
+ "lb vip 90.0.0.0/8 protocol udp port 20000 encap nat4"
+ " type clusterip target_port 3307")
+ for asid in self.ass:
+ self.vapi.cli(
+ "lb as 90.0.0.0/8 protocol udp port 20000 10.0.0.%u"
+ % (asid))
+
+ self.pg0.add_stream(self.generatePackets(self.pg0, isv4=True))
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.checkCapture(encap='nat4', isv4=True)
+
+ finally:
+ for asid in self.ass:
+ self.vapi.cli(
+ "lb as 90.0.0.0/8 protocol udp port 20000 10.0.0.%u del"
+ % (asid))
+ self.vapi.cli(
+ "lb vip 90.0.0.0/8 protocol udp port 20000 encap nat4"
+ " type clusterip target_port 3307 del")
+ self.vapi.cli("test lb flowtable flush")
+
+ def test_lb_ip6_nat6_port(self):
+ """ Load Balancer IP6 NAT6 on per-port-vip case """
+ try:
+ self.vapi.cli(
+ "lb vip 2001::/16 protocol udp port 20000 encap nat6"
+ " type clusterip target_port 3307")
+ for asid in self.ass:
+ self.vapi.cli(
+ "lb as 2001::/16 protocol udp port 20000 2002::%u"
+ % (asid))
+
+ self.pg0.add_stream(self.generatePackets(self.pg0, isv4=False))
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.checkCapture(encap='nat6', isv4=False)
+
+ finally:
+ for asid in self.ass:
+ self.vapi.cli(
+ "lb as 2001::/16 protocol udp port 20000 2002::%u del"
+ % (asid))
+ self.vapi.cli(
+ "lb vip 2001::/16 protocol udp port 20000 encap nat6"
+ " type clusterip target_port 3307 del")
+ self.vapi.cli("test lb flowtable flush")
diff --git a/test/test_lb_api.py b/test/test_lb_api.py
new file mode 100644
index 00000000000..70d41d432a7
--- /dev/null
+++ b/test/test_lb_api.py
@@ -0,0 +1,76 @@
+# Copyright (c) 2019. Vinci Consulting Corp. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import framework
+import ipaddress
+
+DEFAULT_VIP = "lb_vip_details(_0=978, context=12, vip=vl_api_lb_ip_addr_t(pfx=IPv6Network(u'::/0'), protocol=<vl_api_ip_proto_t.IP_API_PROTO_RESERVED: 255>, port=0), encap=<vl_api_lb_encap_type_t.LB_API_ENCAP_TYPE_GRE4: 0>, dscp=<vl_api_ip_dscp_t.IP_API_DSCP_CS0: 0>, srv_type=<vl_api_lb_srv_type_t.LB_API_SRV_TYPE_CLUSTERIP: 0>, target_port=0, flow_table_length=0)" # noqa
+
+
+class TestLbEmptyApi(framework.VppTestCase):
+ """TestLbEmptyApi """
+
+ def test_lb_empty_vip_dump(self):
+
+ # no records should normally return [], but
+ # lb initializes with a default VIP
+ rv = self.vapi.lb_vip_dump()
+ # print(rv)
+ self.assertEqual(rv, [], 'Expected: [] Received: %r.' % rv)
+
+ def test_lb_empty_as_dump(self):
+
+ # no records should return []
+ rv = self.vapi.lb_as_dump()
+ # print(rv)
+ self.assertEqual(rv, [], 'Expected: [] Received: %r.' % rv)
+
+
+class TestLbApi(framework.VppTestCase):
+ """TestLbApi """
+
+ def test_lb_vip_dump(self):
+ # add some vips
+ # rv = self.vapi.lb_add_del_vip(pfx=ipaddress.IPv4Network(u'1.2.3.0/24'), # noqa
+ # protocol=17,
+ # encap=0)
+ # print(rv)
+ self.vapi.cli("lb vip 2001::/16 encap gre6")
+ rv = self.vapi.lb_vip_dump()
+ # print(rv)
+ self.assertEqual(str(rv[-1].vip.pfx), "2001::/16",
+ 'Expected: 2001::/16 Received: %r.' % rv[-1].vip.pfx)
+
+ self.vapi.cli("lb vip 2001::/16 del")
+
+
+class TestLbAsApi(framework.VppTestCase):
+ """TestLbAsApi """
+
+ def test_lb_as_dump(self):
+ # add some vips
+ self.vapi.cli("lb vip 2001::/16 encap gre6")
+ self.vapi.cli("lb as 2001::/16 2000::1")
+ # add some as's for the vips
+ # rv = self.vapi.lb_add_del_as(
+ # pfx=ipaddress.IPv4Network(u"10.0.0.0/24"),
+ # as_address=ipaddress.IPv4Address(u"192.168.1.1"))
+
+ # print(rv)
+ rv = self.vapi.lb_as_dump()
+ # print(rv)
+ self.assertEqual(str(rv[0].vip.pfx), "2001::/16",
+ 'Expected: "2001::/16" Received: %r.' % rv[0].vip.pfx)
+ self.assertEqual(str(rv[0].app_srv), "2000::1",
+ 'Expected: "2000::1" Received: %r.' % rv[0].app_srv)
diff --git a/test/test_linux_cp.py b/test/test_linux_cp.py
new file mode 100644
index 00000000000..df38681b16e
--- /dev/null
+++ b/test/test_linux_cp.py
@@ -0,0 +1,174 @@
+#!/usr/bin/env python3
+
+import unittest
+
+from scapy.layers.inet import IP, UDP
+from scapy.layers.inet6 import IPv6, Raw
+from scapy.layers.l2 import Ether, ARP, Dot1Q
+
+from vpp_object import VppObject
+from framework import VppTestCase, VppTestRunner
+
+
+class VppLcpPair(VppObject):
+ def __init__(self, test, phy, host):
+ self._test = test
+ self.phy = phy
+ self.host = host
+
+ def add_vpp_config(self):
+ self._test.vapi.cli("test lcp add phy %s host %s" %
+ (self.phy, self.host))
+ self._test.registry.register(self, self._test.logger)
+ return self
+
+ def remove_vpp_config(self):
+ self._test.vapi.cli("test lcp del phy %s host %s" %
+ (self.phy, self.host))
+
+ def object_id(self):
+ return "lcp:%d:%d" % (self.phy.sw_if_index,
+ self.host.sw_if_index)
+
+ def query_vpp_config(self):
+ pairs = list(self._test.vapi.vpp.details_iter(
+ self._test.vapi.lcp_itf_pair_get))
+
+ for p in pairs:
+ if p.phy_sw_if_index == self.phy.sw_if_index and \
+ p.host_sw_if_index == self.host.sw_if_index:
+ return True
+ return False
+
+
+class TestLinuxCP(VppTestCase):
+ """ Linux Control Plane """
+
+ extra_vpp_plugin_config = ["plugin",
+ "linux_cp_plugin.so",
+ "{", "enable", "}",
+ "plugin",
+ "linux_cp_unittest_plugin.so",
+ "{", "enable", "}"]
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestLinuxCP, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestLinuxCP, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestLinuxCP, self).setUp()
+
+ # create 4 pg interfaces so there are a few addresses
+ # in the FIB
+ self.create_pg_interfaces(range(4))
+
+ for i in self.pg_interfaces:
+ i.admin_up()
+
+ def tearDown(self):
+ for i in self.pg_interfaces:
+ i.admin_down()
+ super(TestLinuxCP, self).tearDown()
+
+ def test_linux_cp_tap(self):
+ """ Linux CP TAP """
+
+ #
+ # Setup
+ #
+
+ arp_opts = {"who-has": 1, "is-at": 2}
+
+ # create two pairs, wihch a bunch of hots on the phys
+ hosts = [self.pg0, self.pg1]
+ phys = [self.pg2, self.pg3]
+ N_HOSTS = 4
+
+ for phy in phys:
+ phy.config_ip4()
+ phy.generate_remote_hosts(4)
+ phy.configure_ipv4_neighbors()
+
+ pair1 = VppLcpPair(self, phys[0], hosts[0]).add_vpp_config()
+ pair2 = VppLcpPair(self, phys[1], hosts[1]).add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh lcp adj verbose"))
+ self.logger.info(self.vapi.cli("sh lcp"))
+
+ #
+ # Traffic Tests
+ #
+
+ # hosts to phys
+ for phy, host in zip(phys, hosts):
+ for j in range(N_HOSTS):
+ p = (Ether(src=phy.local_mac,
+ dst=phy.remote_hosts[j].mac) /
+ IP(src=phy.local_ip4,
+ dst=phy.remote_hosts[j].ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw())
+
+ rxs = self.send_and_expect(host, [p], phy)
+
+ # verify packet is unchanged
+ for rx in rxs:
+ self.assertEqual(p.show2(True), rx.show2(True))
+
+ # ARPs x-connect to phy
+ p = (Ether(dst="ff:ff:ff:ff:ff:ff",
+ src=phy.remote_hosts[j].mac) /
+ ARP(op="who-has",
+ hwdst=phy.remote_hosts[j].mac,
+ hwsrc=phy.local_mac,
+ psrc=phy.local_ip4,
+ pdst=phy.remote_hosts[j].ip4))
+
+ rxs = self.send_and_expect(host, [p], phy)
+
+ # verify packet is unchanged
+ for rx in rxs:
+ self.assertEqual(p.show2(True), rx.show2(True))
+
+ # phy to host
+ for phy, host in zip(phys, hosts):
+ for j in range(N_HOSTS):
+ p = (Ether(dst=phy.local_mac,
+ src=phy.remote_hosts[j].mac) /
+ IP(dst=phy.local_ip4,
+ src=phy.remote_hosts[j].ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw())
+
+ rxs = self.send_and_expect(phy, [p], host)
+
+ # verify packet is unchanged
+ for rx in rxs:
+ self.assertEqual(p.show2(True), rx.show2(True))
+
+ # ARPs rx'd on the phy are sent to the host
+ p = (Ether(dst="ff:ff:ff:ff:ff:ff",
+ src=phy.remote_hosts[j].mac) /
+ ARP(op="is-at",
+ hwsrc=phy.remote_hosts[j].mac,
+ hwdst=phy.local_mac,
+ pdst=phy.local_ip4,
+ psrc=phy.remote_hosts[j].ip4))
+
+ rxs = self.send_and_expect(phy, [p], host)
+
+ # verify packet is unchanged
+ for rx in rxs:
+ self.assertEqual(p.show2(True), rx.show2(True))
+
+ # cleanup
+ for phy in phys:
+ phy.unconfig_ip4()
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_lisp.py b/test/test_lisp.py
new file mode 100644
index 00000000000..0a6e7525159
--- /dev/null
+++ b/test/test_lisp.py
@@ -0,0 +1,219 @@
+#!/usr/bin/env python3
+
+import abc
+import unittest
+
+from scapy.fields import BitField, ByteField, FlagsField, IntField
+from scapy.packet import bind_layers, Packet, Raw
+from scapy.layers.inet import IP, UDP, Ether
+from scapy.layers.inet6 import IPv6
+
+from framework import VppTestCase, VppTestRunner
+from lisp import VppLocalMapping, VppLispAdjacency, VppLispLocator, \
+ VppLispLocatorSet, VppRemoteMapping, LispRemoteLocator
+from util import ppp
+
+# From py_lispnetworking.lisp.py: # GNU General Public License v2.0
+
+
+class LISP_GPE_Header(Packet):
+ name = "LISP GPE Header"
+ fields_desc = [
+ FlagsField("gpe_flags", None, 6, ["N", "L", "E", "V", "I", "P"]),
+ BitField("reserved", 0, 18),
+ ByteField("next_proto", 0),
+ IntField("iid", 0),
+ ]
+bind_layers(UDP, LISP_GPE_Header, dport=4341)
+bind_layers(UDP, LISP_GPE_Header, sport=4341)
+bind_layers(LISP_GPE_Header, IP, next_proto=1)
+bind_layers(LISP_GPE_Header, IPv6, next_proto=2)
+bind_layers(LISP_GPE_Header, Ether, next_proto=3)
+
+
+class ForeignAddressFactory(object):
+ count = 0
+ prefix_len = 24
+ net_template = '10.10.10.{}'
+ net = net_template.format(0) + '/' + str(prefix_len)
+
+ def get_ip4(self):
+ if self.count > 255:
+ raise Exception("Network host address exhaustion")
+ self.count += 1
+ return self.net_template.format(self.count)
+
+
+class Driver(metaclass=abc.ABCMeta):
+
+ config_order = ['locator-sets',
+ 'locators',
+ 'local-mappings',
+ 'remote-mappings',
+ 'adjacencies']
+
+ """ Basic class for data driven testing """
+ def __init__(self, test, test_cases):
+ self._test_cases = test_cases
+ self._test = test
+
+ @property
+ def test_cases(self):
+ return self._test_cases
+
+ @property
+ def test(self):
+ return self._test
+
+ def create_packet(self, src_if, dst_if, deid, payload=''):
+ """
+ Create IPv4 packet
+
+ param: src_if
+ param: dst_if
+ """
+ packet = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ IP(src=src_if.remote_ip4, dst=deid) /
+ Raw(payload))
+ return packet
+
+ @abc.abstractmethod
+ def run(self):
+ """ testing procedure """
+ pass
+
+
+class SimpleDriver(Driver):
+ """ Implements simple test procedure """
+ def __init__(self, test, test_cases):
+ super(SimpleDriver, self).__init__(test, test_cases)
+
+ def verify_capture(self, src_loc, dst_loc, capture):
+ """
+ Verify captured packet
+
+ :param src_loc: source locator address
+ :param dst_loc: destination locator address
+ :param capture: list of captured packets
+ """
+ self.test.assertEqual(len(capture), 1, "Unexpected number of "
+ "packets! Expected 1 but {} received"
+ .format(len(capture)))
+ packet = capture[0]
+ try:
+ ip_hdr = packet[IP]
+ # assert the values match
+ self.test.assertEqual(ip_hdr.src, src_loc, "IP source address")
+ self.test.assertEqual(ip_hdr.dst, dst_loc,
+ "IP destination address")
+ gpe_hdr = packet[LISP_GPE_Header]
+ self.test.assertEqual(gpe_hdr.next_proto, 1,
+ "next_proto is not ipv4!")
+ ih = gpe_hdr[IP]
+ self.test.assertEqual(ih.src, self.test.pg0.remote_ip4,
+ "unexpected source EID!")
+ self.test.assertEqual(ih.dst, self.test.deid_ip4,
+ "unexpected dest EID!")
+ except:
+ self.test.logger.error(ppp("Unexpected or invalid packet:",
+ packet))
+ raise
+
+ def configure_tc(self, tc):
+ for config_item in self.config_order:
+ for vpp_object in tc[config_item]:
+ vpp_object.add_vpp_config()
+
+ def run(self, dest):
+ """ Send traffic for each test case and verify that it
+ is encapsulated """
+ for tc in enumerate(self.test_cases):
+ self.test.logger.info('Running {}'.format(tc[1]['name']))
+ self.configure_tc(tc[1])
+
+ packet = self.create_packet(self.test.pg0, self.test.pg1, dest,
+ 'data')
+ self.test.pg0.add_stream(packet)
+ self.test.pg0.enable_capture()
+ self.test.pg1.enable_capture()
+ self.test.pg_start()
+ capture = self.test.pg1.get_capture(1)
+ self.verify_capture(self.test.pg1.local_ip4,
+ self.test.pg1.remote_ip4, capture)
+ self.test.pg0.assert_nothing_captured()
+
+
+class TestLisp(VppTestCase):
+ """ Basic LISP test """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestLisp, cls).setUpClass()
+ cls.faf = ForeignAddressFactory()
+ cls.create_pg_interfaces(range(2)) # create pg0 and pg1
+ for i in cls.pg_interfaces:
+ i.admin_up() # put the interface upsrc_if
+ i.config_ip4() # configure IPv4 address on the interface
+ i.resolve_arp() # resolve ARP, so that we know VPP MAC
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestLisp, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestLisp, self).setUp()
+ self.vapi.lisp_enable_disable(is_enable=1)
+
+ def test_lisp_basic_encap(self):
+ """Test case for basic encapsulation"""
+
+ self.deid_ip4_net = self.faf.net
+ self.deid_ip4 = self.faf.get_ip4()
+ self.seid_ip4 = '{!s}/{!s}'.format(self.pg0.local_ip4, 32)
+ self.rloc_ip4 = self.pg1.remote_ip4
+
+ test_cases = [
+ {
+ 'name': 'basic ip4 over ip4',
+ 'locator-sets': [VppLispLocatorSet(self, 'ls-4o4')],
+ 'locators': [
+ VppLispLocator(self, self.pg1.sw_if_index, 'ls-4o4')
+ ],
+ 'local-mappings': [
+ VppLocalMapping(self, self.seid_ip4, 'ls-4o4')
+ ],
+ 'remote-mappings': [
+ VppRemoteMapping(self, self.deid_ip4_net,
+ [LispRemoteLocator(self.rloc_ip4)])
+ ],
+ 'adjacencies': [
+ VppLispAdjacency(self, self.seid_ip4, self.deid_ip4_net)
+ ]
+ }
+ ]
+ self.test_driver = SimpleDriver(self, test_cases)
+ self.test_driver.run(self.deid_ip4)
+
+
+class TestLispUT(VppTestCase):
+ """ Lisp UT """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestLispUT, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestLispUT, cls).tearDownClass()
+
+ def test_fib(self):
+ """ LISP Unit Tests """
+ error = self.vapi.cli("test lisp cp")
+
+ if error:
+ self.logger.critical(error)
+ self.assertNotIn("Failed", error)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_mactime.py b/test/test_mactime.py
new file mode 100644
index 00000000000..85ded33d158
--- /dev/null
+++ b/test/test_mactime.py
@@ -0,0 +1,160 @@
+#!/usr/bin/env python3
+
+import unittest
+
+from framework import VppTestCase, VppTestRunner, running_gcov_tests
+from vpp_ip_route import VppIpTable, VppIpRoute, VppRoutePath
+
+
+class TestMactime(VppTestCase):
+ """ Mactime Unit Test Cases """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestMactime, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestMactime, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestMactime, self).setUp()
+
+ def tearDown(self):
+ super(TestMactime, self).tearDown()
+
+ def test_mactime_range_unittest(self):
+ """ Time Range Test """
+ error = self.vapi.cli("test time-range")
+
+ if error:
+ self.logger.critical(error)
+ self.assertNotIn('FAILED', error)
+
+ @unittest.skipUnless(running_gcov_tests, "part of code coverage tests")
+ def test_mactime_unittest(self):
+ """ Mactime Plugin Code Coverage Test """
+ cmds = ["loopback create",
+ "mactime enable-disable disable",
+ "mactime enable-disable loop0",
+ "mactime enable-disable loop0 disable",
+ "mactime enable-disable sw_if_index 9999",
+ "bin mactime_enable_disable loop0",
+ "bin mactime_enable_disable loop0 disable",
+ "bin mactime_enable_disable sw_if_index 1",
+ "set interface state loop0 up",
+ "clear mactime",
+ "set ip neighbor loop0 192.168.1.1 00:d0:2d:5e:86:85",
+ "bin mactime_add_del_range name sallow "
+ "mac 00:d0:2d:5e:86:85 allow-static del",
+ "bin mactime_add_del_range name sallow "
+ "mac 00:d0:2d:5e:86:85 allow-static",
+ "bin mactime_add_del_range name sallow "
+ "mac 00:d0:2d:5e:86:85 allow-static del",
+ "bin mactime_add_del_range name sallow "
+ "mac 00:d0:2d:5e:86:85 allow-static",
+ "bin mactime_add_del_range name sblock "
+ "mac 01:00:5e:7f:ff:fa drop-static",
+ "bin mactime_add_del_range name ddrop "
+ "mac c8:bc:c8:5a:ba:f3 drop-range Sun - Sat "
+ "00:00 - 23:59",
+ "bin mactime_add_del_range name dallow "
+ "mac c8:bc:c8:5a:ba:f4 allow-range Sun - Sat "
+ "00:00 - 23:59",
+ "bin mactime_add_del_range name multi "
+ "mac c8:bc:c8:f0:f0:f0 allow-range Sun - Mon "
+ "00:00 - 23:59 Tue - Sat 00:00 - 23:59",
+ "bin mactime_add_del_range bogus",
+ "bin mactime_add_del_range mac 01:00:5e:7f:f0:f0 allow-static",
+ "bin mactime_add_del_range "
+ "name tooloooooooooooooooooooooooooooooooooooooooooooooooo"
+ "nnnnnnnnnnnnnnnnnnnnnnnnnnnng mac 00:00:de:ad:be:ef "
+ "allow-static",
+ "packet-generator new {\n"
+ " name allow\n"
+ " limit 15\n"
+ " size 128-128\n"
+ " interface loop0\n"
+ " node ethernet-input\n"
+ " data {\n"
+ " IP6: 00:d0:2d:5e:86:85 -> 00:0d:ea:d0:00:00\n"
+ " ICMP: db00::1 -> db00::2\n"
+ " incrementing 30\n"
+ " }\n",
+ "}\n",
+ "packet-generator new {\n"
+ " name deny\n"
+ " limit 15\n"
+ " size 128-128\n"
+ " interface loop0\n"
+ " node ethernet-input\n"
+ " data {\n"
+ " IP6: 01:00:5e:7f:ff:fa -> 00:0d:ea:d0:00:00\n"
+ " ICMP: db00::1 -> db00::2\n"
+ " incrementing 30\n"
+ " }\n",
+ "}\n",
+ "packet-generator new {\n"
+ " name ddrop\n"
+ " limit 15\n"
+ " size 128-128\n"
+ " interface loop0\n"
+ " node ethernet-input\n"
+ " data {\n"
+ " IP6: c8:bc:c8:5a:ba:f3 -> 00:0d:ea:d0:00:00\n"
+ " ICMP: db00::1 -> db00::2\n"
+ " incrementing 30\n"
+ " }\n",
+ "}\n",
+ "packet-generator new {\n"
+ " name dallow\n"
+ " limit 15\n"
+ " size 128-128\n"
+ " interface loop0\n"
+ " node ethernet-input\n"
+ " data {\n"
+ " IP6: c8:bc:c8:5a:ba:f4 -> 00:0d:ea:d0:00:00\n"
+ " ICMP: db00::1 -> db00::2\n"
+ " incrementing 30\n"
+ " }\n"
+ "}\n"
+ "packet-generator new {\n"
+ " name makeentry\n"
+ " limit 15\n"
+ " size 128-128\n"
+ " interface loop0\n"
+ " node ethernet-input\n"
+ " data {\n"
+ " IP6: c8:bc:c8:5a:b0:0b -> 00:0d:ea:d0:00:00\n"
+ " ICMP: db00::1 -> db00::2\n"
+ " incrementing 30\n"
+ " }\n"
+ "}\n"
+ "packet-generator new {\n"
+ " name tx\n"
+ " limit 15\n"
+ " size 128-128\n"
+ " interface local0\n"
+ " tx-interface loop0\n"
+ " node loop0-output\n"
+ " data {\n"
+ " hex 0x01005e7ffffa000dead000000800"
+ "0102030405060708090a0b0c0d0e0f0102030405\n"
+ " }\n"
+ "}\n"
+ "trace add pg-input 2",
+ "pa en",
+ "show mactime verbose 2",
+ "show trace",
+ "show error"]
+
+ for cmd in cmds:
+ r = self.vapi.cli_return_response(cmd)
+ if r.retval != 0:
+ if hasattr(r, 'reply'):
+ self.logger.info(cmd + " FAIL reply " + r.reply)
+ else:
+ self.logger.info(cmd + " FAIL retval " + str(r.retval))
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_map.py b/test/test_map.py
new file mode 100644
index 00000000000..90fee301267
--- /dev/null
+++ b/test/test_map.py
@@ -0,0 +1,964 @@
+#!/usr/bin/env python3
+
+import ipaddress
+import unittest
+
+from framework import VppTestCase, VppTestRunner
+from vpp_ip import DpoProto
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from util import fragment_rfc791, fragment_rfc8200
+
+import scapy.compat
+from scapy.layers.l2 import Ether
+from scapy.packet import Raw
+from scapy.layers.inet import IP, UDP, ICMP, TCP
+from scapy.layers.inet6 import IPv6, ICMPv6TimeExceeded, IPv6ExtHdrFragment, \
+ ICMPv6EchoRequest, ICMPv6DestUnreach
+
+
+class TestMAP(VppTestCase):
+ """ MAP Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestMAP, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestMAP, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestMAP, self).setUp()
+
+ # create 2 pg interfaces
+ self.create_pg_interfaces(range(4))
+
+ # pg0 is 'inside' IPv4
+ self.pg0.admin_up()
+ self.pg0.config_ip4()
+ self.pg0.resolve_arp()
+ self.pg0.generate_remote_hosts(2)
+ self.pg0.configure_ipv4_neighbors()
+
+ # pg1 is 'outside' IPv6
+ self.pg1.admin_up()
+ self.pg1.config_ip6()
+ self.pg1.generate_remote_hosts(4)
+ self.pg1.configure_ipv6_neighbors()
+
+ def tearDown(self):
+ super(TestMAP, self).tearDown()
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.unconfig_ip6()
+ i.admin_down()
+
+ def send_and_assert_encapped(self, packets, ip6_src, ip6_dst, dmac=None):
+ if not dmac:
+ dmac = self.pg1.remote_mac
+
+ self.pg0.add_stream(packets)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ capture = self.pg1.get_capture(len(packets))
+ for rx, tx in zip(capture, packets):
+ self.assertEqual(rx[Ether].dst, dmac)
+ self.assertEqual(rx[IP].src, tx[IP].src)
+ self.assertEqual(rx[IPv6].src, ip6_src)
+ self.assertEqual(rx[IPv6].dst, ip6_dst)
+
+ def send_and_assert_encapped_one(self, packet, ip6_src, ip6_dst,
+ dmac=None):
+ return self.send_and_assert_encapped([packet], ip6_src, ip6_dst, dmac)
+
+ def test_api_map_domain_dump(self):
+ map_dst = '2001::/64'
+ map_src = '3000::1/128'
+ client_pfx = '192.168.0.0/16'
+ tag = 'MAP-E tag.'
+ index = self.vapi.map_add_domain(ip4_prefix=client_pfx,
+ ip6_prefix=map_dst,
+ ip6_src=map_src,
+ tag=tag).index
+ rv = self.vapi.map_domain_dump()
+
+ # restore the state early so as to not impact subsequent tests.
+ # If an assert fails, we will not get the chance to do it at the end.
+ self.vapi.map_del_domain(index=index)
+
+ self.assertGreater(len(rv), 0,
+ "Expected output from 'map_domain_dump'")
+
+ # typedefs are returned as ipaddress objects.
+ # wrap results in str() ugh! to avoid the need to call unicode.
+ self.assertEqual(str(rv[0].ip4_prefix), client_pfx)
+ self.assertEqual(str(rv[0].ip6_prefix), map_dst)
+ self.assertEqual(str(rv[0].ip6_src), map_src)
+
+ self.assertEqual(rv[0].tag, tag,
+ "output produced incorrect tag value.")
+
+ def create_domains(self, ip4_pfx_str, ip6_pfx_str, ip6_src_str):
+ ip4_pfx = ipaddress.ip_network(ip4_pfx_str)
+ ip6_dst = ipaddress.ip_network(ip6_pfx_str)
+ mod = ip4_pfx.num_addresses / 1024
+ indicies = []
+ for i in range(ip4_pfx.num_addresses):
+ rv = self.vapi.map_add_domain(ip6_prefix=ip6_pfx_str,
+ ip4_prefix=str(ip4_pfx[i]) + "/32",
+ ip6_src=ip6_src_str)
+ indicies.append(rv.index)
+ return indicies
+
+ def test_api_map_domains_get(self):
+ # Create a bunch of domains
+ no_domains = 4096 # This must be large enough to ensure VPP suspends
+ domains = self.create_domains('130.67.0.0/20', '2001::/32',
+ '2001::1/128')
+ self.assertEqual(len(domains), no_domains)
+
+ d = []
+ cursor = 0
+
+ # Invalid cursor
+ rv, details = self.vapi.map_domains_get(cursor=no_domains+10)
+ self.assertEqual(rv.retval, -7)
+
+ # Delete a domain in the middle of walk
+ rv, details = self.vapi.map_domains_get(cursor=0)
+ self.assertEqual(rv.retval, -165)
+ self.vapi.map_del_domain(index=rv.cursor)
+ domains.remove(rv.cursor)
+
+ # Continue at point of deleted cursor
+ rv, details = self.vapi.map_domains_get(cursor=rv.cursor)
+ self.assertIn(rv.retval, [0, -165])
+
+ d = list(self.vapi.vpp.details_iter(self.vapi.map_domains_get))
+ self.assertEqual(len(d), no_domains - 1)
+
+ # Clean up
+ for i in domains:
+ self.vapi.map_del_domain(index=i)
+
+ def test_map_e_udp(self):
+ """ MAP-E UDP"""
+
+ #
+ # Add a route to the MAP-BR
+ #
+ map_br_pfx = "2001::"
+ map_br_pfx_len = 32
+ map_route = VppIpRoute(self,
+ map_br_pfx,
+ map_br_pfx_len,
+ [VppRoutePath(self.pg1.remote_ip6,
+ self.pg1.sw_if_index)])
+ map_route.add_vpp_config()
+
+ #
+ # Add a domain that maps from pg0 to pg1
+ #
+ map_dst = '2001::/32'
+ map_src = '3000::1/128'
+ client_pfx = '192.168.0.0/16'
+ map_translated_addr = '2001:0:101:7000:0:c0a8:101:7'
+ tag = 'MAP-E tag.'
+ self.vapi.map_add_domain(ip4_prefix=client_pfx,
+ ip6_prefix=map_dst,
+ ip6_src=map_src,
+ ea_bits_len=20,
+ psid_offset=4,
+ psid_length=4,
+ tag=tag)
+
+ self.vapi.map_param_set_security_check(enable=1, fragments=1)
+
+ # Enable MAP on interface.
+ self.vapi.map_if_enable_disable(is_enable=1,
+ sw_if_index=self.pg0.sw_if_index,
+ is_translation=0)
+
+ # Ensure MAP doesn't steal all packets!
+ v4 = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg0.remote_ip4) /
+ UDP(sport=20000, dport=10000) /
+ Raw(b'\xa5' * 100))
+ rx = self.send_and_expect(self.pg0, v4 * 4, self.pg0)
+ v4_reply = v4[1]
+ v4_reply.ttl -= 1
+ for p in rx:
+ self.validate(p[1], v4_reply)
+
+ #
+ # Fire in a v4 packet that will be encapped to the BR
+ #
+ v4 = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst='192.168.1.1') /
+ UDP(sport=20000, dport=10000) /
+ Raw(b'\xa5' * 100))
+
+ self.send_and_assert_encapped(v4 * 4, "3000::1", map_translated_addr)
+
+ #
+ # Verify reordered fragments are able to pass as well
+ #
+ v4 = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(id=1, src=self.pg0.remote_ip4, dst='192.168.1.1') /
+ UDP(sport=20000, dport=10000) /
+ Raw(b'\xa5' * 1000))
+
+ frags = fragment_rfc791(v4, 400)
+ frags.reverse()
+
+ self.send_and_assert_encapped(frags, "3000::1", map_translated_addr)
+
+ # Enable MAP on interface.
+ self.vapi.map_if_enable_disable(is_enable=1,
+ sw_if_index=self.pg1.sw_if_index,
+ is_translation=0)
+
+ # Ensure MAP doesn't steal all packets
+ v6 = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IPv6(src=self.pg1.remote_ip6, dst=self.pg1.remote_ip6) /
+ UDP(sport=20000, dport=10000) /
+ Raw(b'\xa5' * 100))
+ rx = self.send_and_expect(self.pg1, v6*1, self.pg1)
+ v6_reply = v6[1]
+ v6_reply.hlim -= 1
+ for p in rx:
+ self.validate(p[1], v6_reply)
+
+ #
+ # Fire in a V6 encapped packet.
+ # expect a decapped packet on the inside ip4 link
+ #
+ p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IPv6(dst='3000::1', src=map_translated_addr) /
+ IP(dst=self.pg0.remote_ip4, src='192.168.1.1') /
+ UDP(sport=10000, dport=20000) /
+ Raw(b'\xa5' * 100))
+
+ self.pg1.add_stream(p)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture(1)
+ rx = rx[0]
+
+ self.assertFalse(rx.haslayer(IPv6))
+ self.assertEqual(rx[IP].src, p[IP].src)
+ self.assertEqual(rx[IP].dst, p[IP].dst)
+
+ #
+ # Verify encapped reordered fragments pass as well
+ #
+ p = (IP(id=1, dst=self.pg0.remote_ip4, src='192.168.1.1') /
+ UDP(sport=10000, dport=20000) /
+ Raw(b'\xa5' * 1500))
+ frags = fragment_rfc791(p, 400)
+ frags.reverse()
+
+ stream = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IPv6(dst='3000::1', src=map_translated_addr) /
+ x for x in frags)
+
+ self.pg1.add_stream(stream)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture(len(frags))
+
+ for r in rx:
+ self.assertFalse(r.haslayer(IPv6))
+ self.assertEqual(r[IP].src, p[IP].src)
+ self.assertEqual(r[IP].dst, p[IP].dst)
+
+ # Verify that fragments pass even if ipv6 layer is fragmented
+ stream = (IPv6(dst='3000::1', src=map_translated_addr) / x
+ for x in frags)
+
+ v6_stream = [
+ Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) / x
+ for i in range(len(frags))
+ for x in fragment_rfc8200(
+ IPv6(dst='3000::1', src=map_translated_addr) / frags[i],
+ i, 200)]
+
+ self.pg1.add_stream(v6_stream)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture(len(frags))
+
+ for r in rx:
+ self.assertFalse(r.haslayer(IPv6))
+ self.assertEqual(r[IP].src, p[IP].src)
+ self.assertEqual(r[IP].dst, p[IP].dst)
+
+ #
+ # Pre-resolve. No API for this!!
+ #
+ self.vapi.ppcli("map params pre-resolve ip6-nh 4001::1")
+
+ self.send_and_assert_no_replies(self.pg0, v4,
+ "resolved via default route")
+
+ #
+ # Add a route to 4001::1. Expect the encapped traffic to be
+ # sent via that routes next-hop
+ #
+ pre_res_route = VppIpRoute(self, "4001::1", 128,
+ [VppRoutePath(self.pg1.remote_hosts[2].ip6,
+ self.pg1.sw_if_index)])
+ pre_res_route.add_vpp_config()
+
+ self.send_and_assert_encapped_one(v4, "3000::1",
+ map_translated_addr,
+ dmac=self.pg1.remote_hosts[2].mac)
+
+ #
+ # change the route to the pre-solved next-hop
+ #
+ pre_res_route.modify([VppRoutePath(self.pg1.remote_hosts[3].ip6,
+ self.pg1.sw_if_index)])
+ pre_res_route.add_vpp_config()
+
+ self.send_and_assert_encapped_one(v4, "3000::1",
+ map_translated_addr,
+ dmac=self.pg1.remote_hosts[3].mac)
+
+ #
+ # cleanup. The test infra's object registry will ensure
+ # the route is really gone and thus that the unresolve worked.
+ #
+ pre_res_route.remove_vpp_config()
+ self.vapi.ppcli("map params pre-resolve del ip6-nh 4001::1")
+
+ def test_map_e_inner_frag(self):
+ """ MAP-E Inner fragmentation """
+
+ #
+ # Add a route to the MAP-BR
+ #
+ map_br_pfx = "2001::"
+ map_br_pfx_len = 32
+ map_route = VppIpRoute(self,
+ map_br_pfx,
+ map_br_pfx_len,
+ [VppRoutePath(self.pg1.remote_ip6,
+ self.pg1.sw_if_index)])
+ map_route.add_vpp_config()
+
+ #
+ # Add a domain that maps from pg0 to pg1
+ #
+ map_dst = '2001::/32'
+ map_src = '3000::1/128'
+ client_pfx = '192.168.0.0/16'
+ map_translated_addr = '2001:0:101:7000:0:c0a8:101:7'
+ tag = 'MAP-E tag.'
+ self.vapi.map_add_domain(ip4_prefix=client_pfx,
+ ip6_prefix=map_dst,
+ ip6_src=map_src,
+ ea_bits_len=20,
+ psid_offset=4,
+ psid_length=4,
+ mtu=1000,
+ tag=tag)
+
+ # Enable MAP on interface.
+ self.vapi.map_if_enable_disable(is_enable=1,
+ sw_if_index=self.pg0.sw_if_index,
+ is_translation=0)
+
+ # Enable inner fragmentation
+ self.vapi.map_param_set_fragmentation(inner=1)
+
+ v4 = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst='192.168.1.1') /
+ UDP(sport=20000, dport=10000) /
+ Raw(b'\xa5' * 1300))
+
+ self.pg_send(self.pg0, v4*1)
+ rx = self.pg1.get_capture(2)
+
+ frags = fragment_rfc791(v4[1], 1000)
+ frags[0].id = 0
+ frags[1].id = 0
+ frags[0].ttl -= 1
+ frags[1].ttl -= 1
+ frags[0].chksum = 0
+ frags[1].chksum = 0
+
+ v6_reply1 = (IPv6(src='3000::1', dst=map_translated_addr, hlim=63) /
+ frags[0])
+ v6_reply2 = (IPv6(src='3000::1', dst=map_translated_addr, hlim=63) /
+ frags[1])
+ rx[0][1].fl = 0
+ rx[1][1].fl = 0
+ rx[0][1][IP].id = 0
+ rx[1][1][IP].id = 0
+ rx[0][1][IP].chksum = 0
+ rx[1][1][IP].chksum = 0
+
+ self.validate(rx[0][1], v6_reply1)
+ self.validate(rx[1][1], v6_reply2)
+
+ def test_map_e_tcp_mss(self):
+ """ MAP-E TCP MSS"""
+
+ #
+ # Add a route to the MAP-BR
+ #
+ map_br_pfx = "2001::"
+ map_br_pfx_len = 32
+ map_route = VppIpRoute(self,
+ map_br_pfx,
+ map_br_pfx_len,
+ [VppRoutePath(self.pg1.remote_ip6,
+ self.pg1.sw_if_index)])
+ map_route.add_vpp_config()
+
+ #
+ # Add a domain that maps from pg0 to pg1
+ #
+ map_dst = '2001::/32'
+ map_src = '3000::1/128'
+ client_pfx = '192.168.0.0/16'
+ map_translated_addr = '2001:0:101:5000:0:c0a8:101:5'
+ tag = 'MAP-E TCP tag.'
+ self.vapi.map_add_domain(ip4_prefix=client_pfx,
+ ip6_prefix=map_dst,
+ ip6_src=map_src,
+ ea_bits_len=20,
+ psid_offset=4,
+ psid_length=4,
+ tag=tag)
+
+ # Enable MAP on pg0 interface.
+ self.vapi.map_if_enable_disable(is_enable=1,
+ sw_if_index=self.pg0.sw_if_index,
+ is_translation=0)
+
+ # Enable MAP on pg1 interface.
+ self.vapi.map_if_enable_disable(is_enable=1,
+ sw_if_index=self.pg1.sw_if_index,
+ is_translation=0)
+
+ # TCP MSS clamping
+ mss_clamp = 1300
+ self.vapi.map_param_set_tcp(mss_clamp)
+
+ #
+ # Send a v4 packet that will be encapped.
+ #
+ p_ether = Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac)
+ p_ip4 = IP(src=self.pg0.remote_ip4, dst='192.168.1.1')
+ p_tcp = TCP(sport=20000, dport=30000, flags="S",
+ options=[("MSS", 1455)])
+ p4 = p_ether / p_ip4 / p_tcp
+
+ self.pg1.add_stream(p4)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg1.get_capture(1)
+ rx = rx[0]
+
+ self.assertTrue(rx.haslayer(IPv6))
+ self.assertEqual(rx[IP].src, p4[IP].src)
+ self.assertEqual(rx[IP].dst, p4[IP].dst)
+ self.assertEqual(rx[IPv6].src, "3000::1")
+ self.assertEqual(rx[TCP].options,
+ TCP(options=[('MSS', mss_clamp)]).options)
+
+ def validate(self, rx, expected):
+ self.assertEqual(rx, expected.__class__(scapy.compat.raw(expected)))
+
+ def validate_frag6(self, p6_frag, p_ip6_expected):
+ self.assertFalse(p6_frag.haslayer(IP))
+ self.assertTrue(p6_frag.haslayer(IPv6))
+ self.assertTrue(p6_frag.haslayer(IPv6ExtHdrFragment))
+ self.assertEqual(p6_frag[IPv6].src, p_ip6_expected.src)
+ self.assertEqual(p6_frag[IPv6].dst, p_ip6_expected.dst)
+
+ def validate_frag_payload_len6(self, rx, proto, payload_len_expected):
+ payload_total = 0
+ for p in rx:
+ payload_total += p[IPv6].plen
+
+ # First fragment has proto
+ payload_total -= len(proto())
+
+ # Every fragment has IPv6 fragment header
+ payload_total -= len(IPv6ExtHdrFragment()) * len(rx)
+
+ self.assertEqual(payload_total, payload_len_expected)
+
+ def validate_frag4(self, p4_frag, p_ip4_expected):
+ self.assertFalse(p4_frag.haslayer(IPv6))
+ self.assertTrue(p4_frag.haslayer(IP))
+ self.assertTrue(p4_frag[IP].frag != 0 or p4_frag[IP].flags.MF)
+ self.assertEqual(p4_frag[IP].src, p_ip4_expected.src)
+ self.assertEqual(p4_frag[IP].dst, p_ip4_expected.dst)
+
+ def validate_frag_payload_len4(self, rx, proto, payload_len_expected):
+ payload_total = 0
+ for p in rx:
+ payload_total += len(p[IP].payload)
+
+ # First fragment has proto
+ payload_total -= len(proto())
+
+ self.assertEqual(payload_total, payload_len_expected)
+
+ def payload(self, len):
+ return 'x' * len
+
+ def test_map_t(self):
+ """ MAP-T """
+
+ #
+ # Add a domain that maps from pg0 to pg1
+ #
+ map_dst = '2001:db8::/32'
+ map_src = '1234:5678:90ab:cdef::/64'
+ ip4_pfx = '192.168.0.0/24'
+ tag = 'MAP-T Tag.'
+
+ self.vapi.map_add_domain(ip6_prefix=map_dst,
+ ip4_prefix=ip4_pfx,
+ ip6_src=map_src,
+ ea_bits_len=16,
+ psid_offset=6,
+ psid_length=4,
+ mtu=1500,
+ tag=tag)
+
+ # Enable MAP-T on interfaces.
+ self.vapi.map_if_enable_disable(is_enable=1,
+ sw_if_index=self.pg0.sw_if_index,
+ is_translation=1)
+ self.vapi.map_if_enable_disable(is_enable=1,
+ sw_if_index=self.pg1.sw_if_index,
+ is_translation=1)
+
+ # Ensure MAP doesn't steal all packets!
+ v4 = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg0.remote_ip4) /
+ UDP(sport=20000, dport=10000) /
+ Raw(b'\xa5' * 100))
+ rx = self.send_and_expect(self.pg0, v4*1, self.pg0)
+ v4_reply = v4[1]
+ v4_reply.ttl -= 1
+ for p in rx:
+ self.validate(p[1], v4_reply)
+ # Ensure MAP doesn't steal all packets
+ v6 = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IPv6(src=self.pg1.remote_ip6, dst=self.pg1.remote_ip6) /
+ UDP(sport=20000, dport=10000) /
+ Raw(b'\xa5' * 100))
+ rx = self.send_and_expect(self.pg1, v6*1, self.pg1)
+ v6_reply = v6[1]
+ v6_reply.hlim -= 1
+ for p in rx:
+ self.validate(p[1], v6_reply)
+
+ map_route = VppIpRoute(self,
+ "2001:db8::",
+ 32,
+ [VppRoutePath(self.pg1.remote_ip6,
+ self.pg1.sw_if_index,
+ proto=DpoProto.DPO_PROTO_IP6)])
+ map_route.add_vpp_config()
+
+ #
+ # Send a v4 packet that will be translated
+ #
+ p_ether = Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac)
+ p_ip4 = IP(src=self.pg0.remote_ip4, dst='192.168.0.1')
+ payload = TCP(sport=0xabcd, dport=0xabcd)
+
+ p4 = (p_ether / p_ip4 / payload)
+ p6_translated = (IPv6(src="1234:5678:90ab:cdef:ac:1001:200:0",
+ dst="2001:db8:1f0::c0a8:1:f") / payload)
+ p6_translated.hlim -= 1
+ rx = self.send_and_expect(self.pg0, p4*1, self.pg1)
+ for p in rx:
+ self.validate(p[1], p6_translated)
+
+ # Send back an IPv6 packet that will be "untranslated"
+ p_ether6 = Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac)
+ p_ip6 = IPv6(src='2001:db8:1f0::c0a8:1:f',
+ dst='1234:5678:90ab:cdef:ac:1001:200:0')
+ p6 = (p_ether6 / p_ip6 / payload)
+ p4_translated = (IP(src='192.168.0.1',
+ dst=self.pg0.remote_ip4) / payload)
+ p4_translated.id = 0
+ p4_translated.ttl -= 1
+ rx = self.send_and_expect(self.pg1, p6*1, self.pg0)
+ for p in rx:
+ self.validate(p[1], p4_translated)
+
+ # IPv4 TTL=0
+ ip4_ttl_expired = IP(src=self.pg0.remote_ip4, dst='192.168.0.1', ttl=0)
+ p4 = (p_ether / ip4_ttl_expired / payload)
+
+ icmp4_reply = (IP(id=0, ttl=254, src=self.pg0.local_ip4,
+ dst=self.pg0.remote_ip4) /
+ ICMP(type='time-exceeded',
+ code='ttl-zero-during-transit') /
+ IP(src=self.pg0.remote_ip4,
+ dst='192.168.0.1', ttl=0) / payload)
+ rx = self.send_and_expect(self.pg0, p4*1, self.pg0)
+ for p in rx:
+ self.validate(p[1], icmp4_reply)
+
+ # IPv4 TTL=1
+ ip4_ttl_expired = IP(src=self.pg0.remote_ip4, dst='192.168.0.1', ttl=1)
+ p4 = (p_ether / ip4_ttl_expired / payload)
+
+ icmp4_reply = (IP(id=0, ttl=254, src=self.pg0.local_ip4,
+ dst=self.pg0.remote_ip4) /
+ ICMP(type='time-exceeded',
+ code='ttl-zero-during-transit') /
+ IP(src=self.pg0.remote_ip4,
+ dst='192.168.0.1', ttl=1) / payload)
+ rx = self.send_and_expect(self.pg0, p4*1, self.pg0)
+ for p in rx:
+ self.validate(p[1], icmp4_reply)
+
+ # IPv6 Hop limit at BR
+ ip6_hlim_expired = IPv6(hlim=1, src='2001:db8:1ab::c0a8:1:ab',
+ dst='1234:5678:90ab:cdef:ac:1001:200:0')
+ p6 = (p_ether6 / ip6_hlim_expired / payload)
+
+ icmp6_reply = (IPv6(hlim=255, src=self.pg1.local_ip6,
+ dst="2001:db8:1ab::c0a8:1:ab") /
+ ICMPv6TimeExceeded(code=0) /
+ IPv6(src="2001:db8:1ab::c0a8:1:ab",
+ dst='1234:5678:90ab:cdef:ac:1001:200:0',
+ hlim=1) / payload)
+ rx = self.send_and_expect(self.pg1, p6*1, self.pg1)
+ for p in rx:
+ self.validate(p[1], icmp6_reply)
+
+ # IPv6 Hop limit beyond BR
+ ip6_hlim_expired = IPv6(hlim=0, src='2001:db8:1ab::c0a8:1:ab',
+ dst='1234:5678:90ab:cdef:ac:1001:200:0')
+ p6 = (p_ether6 / ip6_hlim_expired / payload)
+
+ icmp6_reply = (IPv6(hlim=255, src=self.pg1.local_ip6,
+ dst="2001:db8:1ab::c0a8:1:ab") /
+ ICMPv6TimeExceeded(code=0) /
+ IPv6(src="2001:db8:1ab::c0a8:1:ab",
+ dst='1234:5678:90ab:cdef:ac:1001:200:0',
+ hlim=0) / payload)
+ rx = self.send_and_expect(self.pg1, p6*1, self.pg1)
+ for p in rx:
+ self.validate(p[1], icmp6_reply)
+
+ # IPv4 Well-known port
+ p_ip4 = IP(src=self.pg0.remote_ip4, dst='192.168.0.1')
+ payload = UDP(sport=200, dport=200)
+ p4 = (p_ether / p_ip4 / payload)
+ self.send_and_assert_no_replies(self.pg0, p4*1)
+
+ # IPv6 Well-known port
+ payload = UDP(sport=200, dport=200)
+ p6 = (p_ether6 / p_ip6 / payload)
+ self.send_and_assert_no_replies(self.pg1, p6*1)
+
+ # UDP packet fragmentation
+ payload_len = 1453
+ payload = UDP(sport=40000, dport=4000) / self.payload(payload_len)
+ p4 = (p_ether / p_ip4 / payload)
+ self.pg_enable_capture()
+ self.pg0.add_stream(p4)
+ self.pg_start()
+ rx = self.pg1.get_capture(2)
+
+ p_ip6_translated = IPv6(src='1234:5678:90ab:cdef:ac:1001:200:0',
+ dst='2001:db8:1e0::c0a8:1:e')
+ for p in rx:
+ self.validate_frag6(p, p_ip6_translated)
+
+ self.validate_frag_payload_len6(rx, UDP, payload_len)
+
+ # UDP packet fragmentation send fragments
+ payload_len = 1453
+ payload = UDP(sport=40000, dport=4000) / self.payload(payload_len)
+ p4 = (p_ether / p_ip4 / payload)
+ frags = fragment_rfc791(p4, fragsize=1000)
+ self.pg_enable_capture()
+ self.pg0.add_stream(frags)
+ self.pg_start()
+ rx = self.pg1.get_capture(2)
+
+ for p in rx:
+ self.validate_frag6(p, p_ip6_translated)
+
+ self.validate_frag_payload_len6(rx, UDP, payload_len)
+
+ # Send back an fragmented IPv6 UDP packet that will be "untranslated"
+ payload = UDP(sport=4000, dport=40000) / self.payload(payload_len)
+ p_ether6 = Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac)
+ p_ip6 = IPv6(src='2001:db8:1e0::c0a8:1:e',
+ dst='1234:5678:90ab:cdef:ac:1001:200:0')
+ p6 = (p_ether6 / p_ip6 / payload)
+ frags6 = fragment_rfc8200(p6, identification=0xdcba, fragsize=1000)
+
+ p_ip4_translated = IP(src='192.168.0.1', dst=self.pg0.remote_ip4)
+ p4_translated = (p_ip4_translated / payload)
+ p4_translated.id = 0
+ p4_translated.ttl -= 1
+
+ self.pg_enable_capture()
+ self.pg1.add_stream(frags6)
+ self.pg_start()
+ rx = self.pg0.get_capture(2)
+
+ for p in rx:
+ self.validate_frag4(p, p4_translated)
+
+ self.validate_frag_payload_len4(rx, UDP, payload_len)
+
+ # ICMP packet fragmentation
+ payload = ICMP(id=6529) / self.payload(payload_len)
+ p4 = (p_ether / p_ip4 / payload)
+ self.pg_enable_capture()
+ self.pg0.add_stream(p4)
+ self.pg_start()
+ rx = self.pg1.get_capture(2)
+
+ p_ip6_translated = IPv6(src='1234:5678:90ab:cdef:ac:1001:200:0',
+ dst='2001:db8:160::c0a8:1:6')
+ for p in rx:
+ self.validate_frag6(p, p_ip6_translated)
+
+ self.validate_frag_payload_len6(rx, ICMPv6EchoRequest, payload_len)
+
+ # ICMP packet fragmentation send fragments
+ payload = ICMP(id=6529) / self.payload(payload_len)
+ p4 = (p_ether / p_ip4 / payload)
+ frags = fragment_rfc791(p4, fragsize=1000)
+ self.pg_enable_capture()
+ self.pg0.add_stream(frags)
+ self.pg_start()
+ rx = self.pg1.get_capture(2)
+
+ for p in rx:
+ self.validate_frag6(p, p_ip6_translated)
+
+ self.validate_frag_payload_len6(rx, ICMPv6EchoRequest, payload_len)
+
+ # TCP MSS clamping
+ self.vapi.map_param_set_tcp(1300)
+
+ #
+ # Send a v4 TCP SYN packet that will be translated and MSS clamped
+ #
+ p_ether = Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac)
+ p_ip4 = IP(src=self.pg0.remote_ip4, dst='192.168.0.1')
+ payload = TCP(sport=0xabcd, dport=0xabcd, flags="S",
+ options=[('MSS', 1460)])
+
+ p4 = (p_ether / p_ip4 / payload)
+ p6_translated = (IPv6(src="1234:5678:90ab:cdef:ac:1001:200:0",
+ dst="2001:db8:1f0::c0a8:1:f") / payload)
+ p6_translated.hlim -= 1
+ p6_translated[TCP].options = [('MSS', 1300)]
+ rx = self.send_and_expect(self.pg0, p4*1, self.pg1)
+ for p in rx:
+ self.validate(p[1], p6_translated)
+
+ # Send back an IPv6 packet that will be "untranslated"
+ p_ether6 = Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac)
+ p_ip6 = IPv6(src='2001:db8:1f0::c0a8:1:f',
+ dst='1234:5678:90ab:cdef:ac:1001:200:0')
+ p6 = (p_ether6 / p_ip6 / payload)
+ p4_translated = (IP(src='192.168.0.1',
+ dst=self.pg0.remote_ip4) / payload)
+ p4_translated.id = 0
+ p4_translated.ttl -= 1
+ p4_translated[TCP].options = [('MSS', 1300)]
+ rx = self.send_and_expect(self.pg1, p6*1, self.pg0)
+ for p in rx:
+ self.validate(p[1], p4_translated)
+
+ # TCP MSS clamping cleanup
+ self.vapi.map_param_set_tcp(0)
+
+ # Enable icmp6 param to get back ICMPv6 unreachable messages in case
+ # of security check fails
+ self.vapi.map_param_set_icmp6(enable_unreachable=1)
+
+ # Send back an IPv6 packet that will be droppped due to security
+ # check fail
+ p_ether6 = Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac)
+ p_ip6_sec_check_fail = IPv6(src='2001:db8:1fe::c0a8:1:f',
+ dst='1234:5678:90ab:cdef:ac:1001:200:0')
+ payload = TCP(sport=0xabcd, dport=0xabcd)
+ p6 = (p_ether6 / p_ip6_sec_check_fail / payload)
+
+ self.pg_send(self.pg1, p6*1)
+ self.pg0.get_capture(0, timeout=1)
+ rx = self.pg1.get_capture(1)
+
+ icmp6_reply = (IPv6(hlim=255, src=self.pg1.local_ip6,
+ dst='2001:db8:1fe::c0a8:1:f') /
+ ICMPv6DestUnreach(code=5) /
+ p_ip6_sec_check_fail / payload)
+
+ for p in rx:
+ self.validate(p[1], icmp6_reply)
+
+ # ICMPv6 unreachable messages cleanup
+ self.vapi.map_param_set_icmp6(enable_unreachable=0)
+
+ def test_map_t_ip6_psid(self):
+ """ MAP-T v6->v4 PSID validation"""
+
+ #
+ # Add a domain that maps from pg0 to pg1
+ #
+ map_dst = '2001:db8::/32'
+ map_src = '1234:5678:90ab:cdef::/64'
+ ip4_pfx = '192.168.0.0/24'
+ tag = 'MAP-T Test Domain'
+
+ self.vapi.map_add_domain(ip6_prefix=map_dst,
+ ip4_prefix=ip4_pfx,
+ ip6_src=map_src,
+ ea_bits_len=16,
+ psid_offset=6,
+ psid_length=4,
+ mtu=1500,
+ tag=tag)
+
+ # Enable MAP-T on interfaces.
+ self.vapi.map_if_enable_disable(is_enable=1,
+ sw_if_index=self.pg0.sw_if_index,
+ is_translation=1)
+ self.vapi.map_if_enable_disable(is_enable=1,
+ sw_if_index=self.pg1.sw_if_index,
+ is_translation=1)
+
+ map_route = VppIpRoute(self,
+ "2001:db8::",
+ 32,
+ [VppRoutePath(self.pg1.remote_ip6,
+ self.pg1.sw_if_index,
+ proto=DpoProto.DPO_PROTO_IP6)])
+ map_route.add_vpp_config()
+
+ p_ether6 = Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac)
+ p_ip6 = IPv6(src='2001:db8:1f0::c0a8:1:f',
+ dst='1234:5678:90ab:cdef:ac:1001:200:0')
+
+ # Send good IPv6 source port, ensure translated IPv4 received
+ payload = TCP(sport=0xabcd, dport=80)
+ p6 = (p_ether6 / p_ip6 / payload)
+ p4_translated = (IP(src='192.168.0.1',
+ dst=self.pg0.remote_ip4) / payload)
+ p4_translated.id = 0
+ p4_translated.ttl -= 1
+ rx = self.send_and_expect(self.pg1, p6*1, self.pg0)
+ for p in rx:
+ self.validate(p[1], p4_translated)
+
+ # Send bad IPv6 source port, ensure translated IPv4 not received
+ payload = TCP(sport=0xdcba, dport=80)
+ p6 = (p_ether6 / p_ip6 / payload)
+ self.send_and_assert_no_replies(self.pg1, p6*1)
+
+ def test_map_t_pre_resolve(self):
+ """ MAP-T pre-resolve"""
+
+ # Add a domain that maps from pg0 to pg1
+ map_dst = '2001:db8::/32'
+ map_src = '1234:5678:90ab:cdef::/64'
+ ip4_pfx = '192.168.0.0/24'
+ tag = 'MAP-T Test Domain.'
+
+ self.vapi.map_add_domain(ip6_prefix=map_dst,
+ ip4_prefix=ip4_pfx,
+ ip6_src=map_src,
+ ea_bits_len=16,
+ psid_offset=6,
+ psid_length=4,
+ mtu=1500,
+ tag=tag)
+
+ # Enable MAP-T on interfaces.
+ self.vapi.map_if_enable_disable(is_enable=1,
+ sw_if_index=self.pg0.sw_if_index,
+ is_translation=1)
+ self.vapi.map_if_enable_disable(is_enable=1,
+ sw_if_index=self.pg1.sw_if_index,
+ is_translation=1)
+
+ # Enable pre-resolve option
+ self.vapi.map_param_add_del_pre_resolve(ip4_nh_address="10.1.2.3",
+ ip6_nh_address="4001::1",
+ is_add=1)
+
+ # Add a route to 4001::1 and expect the translated traffic to be
+ # sent via that route next-hop.
+ pre_res_route6 = VppIpRoute(self, "4001::1", 128,
+ [VppRoutePath(self.pg1.remote_hosts[2].ip6,
+ self.pg1.sw_if_index)])
+ pre_res_route6.add_vpp_config()
+
+ # Add a route to 10.1.2.3 and expect the "untranslated" traffic to be
+ # sent via that route next-hop.
+ pre_res_route4 = VppIpRoute(self, "10.1.2.3", 32,
+ [VppRoutePath(self.pg0.remote_hosts[1].ip4,
+ self.pg0.sw_if_index)])
+ pre_res_route4.add_vpp_config()
+
+ # Send an IPv4 packet that will be translated
+ p_ether = Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac)
+ p_ip4 = IP(src=self.pg0.remote_ip4, dst='192.168.0.1')
+ payload = TCP(sport=0xabcd, dport=0xabcd)
+ p4 = (p_ether / p_ip4 / payload)
+
+ p6_translated = (IPv6(src="1234:5678:90ab:cdef:ac:1001:200:0",
+ dst="2001:db8:1f0::c0a8:1:f") / payload)
+ p6_translated.hlim -= 1
+
+ rx = self.send_and_expect(self.pg0, p4*1, self.pg1)
+ for p in rx:
+ self.assertEqual(p[Ether].dst, self.pg1.remote_hosts[2].mac)
+ self.validate(p[1], p6_translated)
+
+ # Send back an IPv6 packet that will be "untranslated"
+ p_ether6 = Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac)
+ p_ip6 = IPv6(src='2001:db8:1f0::c0a8:1:f',
+ dst='1234:5678:90ab:cdef:ac:1001:200:0')
+ p6 = (p_ether6 / p_ip6 / payload)
+
+ p4_translated = (IP(src='192.168.0.1',
+ dst=self.pg0.remote_ip4) / payload)
+ p4_translated.id = 0
+ p4_translated.ttl -= 1
+
+ rx = self.send_and_expect(self.pg1, p6*1, self.pg0)
+ for p in rx:
+ self.assertEqual(p[Ether].dst, self.pg0.remote_hosts[1].mac)
+ self.validate(p[1], p4_translated)
+
+ # Cleanup pre-resolve option
+ self.vapi.map_param_add_del_pre_resolve(ip4_nh_address="10.1.2.3",
+ ip6_nh_address="4001::1",
+ is_add=0)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_map_br.py b/test/test_map_br.py
new file mode 100644
index 00000000000..3602ddd2e31
--- /dev/null
+++ b/test/test_map_br.py
@@ -0,0 +1,694 @@
+#!/usr/bin/env python3
+
+import ipaddress
+import unittest
+
+from framework import VppTestCase, VppTestRunner
+from vpp_ip import DpoProto
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from util import fragment_rfc791, fragment_rfc8200
+
+import scapy.compat
+from scapy.layers.l2 import Ether
+from scapy.packet import Raw
+from scapy.layers.inet import IP, UDP, ICMP, TCP, IPerror, UDPerror
+from scapy.layers.inet6 import IPv6, ICMPv6TimeExceeded, ICMPv6PacketTooBig
+from scapy.layers.inet6 import ICMPv6EchoRequest, ICMPv6EchoReply, IPerror6
+
+
+class TestMAPBR(VppTestCase):
+ """ MAP-T Test Cases """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestMAPBR, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestMAPBR, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestMAPBR, self).setUp()
+
+ #
+ # Create 2 pg interfaces.
+ # pg0 is IPv4
+ # pg1 is IPv6
+ #
+ self.create_pg_interfaces(range(2))
+
+ self.pg0.admin_up()
+ self.pg0.config_ip4()
+ self.pg1.generate_remote_hosts(20)
+ self.pg1.configure_ipv4_neighbors()
+ self.pg0.resolve_arp()
+
+ self.pg1.admin_up()
+ self.pg1.config_ip6()
+ self.pg1.generate_remote_hosts(20)
+ self.pg1.configure_ipv6_neighbors()
+
+ #
+ # BR configuration parameters used for all test.
+ #
+ self.ip4_prefix = '198.18.0.0/24'
+ self.ip6_prefix = '2001:db8:f0::/48'
+ self.ip6_src = '2001:db8:ffff:ff00::/64'
+ self.ea_bits_len = 12
+ self.psid_offset = 6
+ self.psid_length = 4
+ self.mtu = 1500
+ self.tag = 'MAP-T BR'
+
+ self.ipv4_internet_address = self.pg0.remote_ip4
+ self.ipv4_map_address = "198.18.0.12"
+ self.ipv4_udp_or_tcp_internet_port = 65000
+ self.ipv4_udp_or_tcp_map_port = 16606
+
+ self.ipv6_cpe_address = "2001:db8:f0:c30:0:c612:c:3" # 198.18.0.12
+ self.ipv6_spoof_address = "2001:db8:f0:c30:0:c612:1c:3" # 198.18.0.28
+ self.ipv6_spoof_prefix = "2001:db8:f0:c30:0:a00:c:3" # 10.0.0.12
+ self.ipv6_spoof_psid = "2001:db8:f0:c30:0:c612:c:4" # 4
+ self.ipv6_spoof_subnet = "2001:db8:f1:c30:0:c612:c:3" # f1
+
+ self.ipv6_udp_or_tcp_internet_port = 65000
+ self.ipv6_udp_or_tcp_map_port = 16606
+ self.ipv6_udp_or_tcp_spoof_port = 16862
+
+ self.ipv6_map_address = (
+ "2001:db8:ffff:ff00:ac:1001:200:0") # 176.16.1.2
+ self.ipv6_map_same_rule_diff_addr = (
+ "2001:db8:ffff:ff00:c6:1200:1000:0") # 198.18.0.16
+ self.ipv6_map_same_rule_same_addr = (
+ "2001:db8:ffff:ff00:c6:1200:c00:0") # 198.18.0.12
+
+ self.map_br_prefix = "2001:db8:f0::"
+ self.map_br_prefix_len = 48
+ self.psid_number = 3
+
+ #
+ # Add an IPv6 route to the MAP-BR.
+ #
+ map_route = VppIpRoute(self,
+ self.map_br_prefix,
+ self.map_br_prefix_len,
+ [VppRoutePath(self.pg1.remote_ip6,
+ self.pg1.sw_if_index)])
+ map_route.add_vpp_config()
+
+ #
+ # Add a MAP BR domain that maps from pg0 to pg1.
+ #
+ self.vapi.map_add_domain(ip4_prefix=self.ip4_prefix,
+ ip6_prefix=self.ip6_prefix,
+ ip6_src=self.ip6_src,
+ ea_bits_len=self.ea_bits_len,
+ psid_offset=self.psid_offset,
+ psid_length=self.psid_length,
+ mtu=self.mtu,
+ tag=self.tag)
+
+ #
+ # Set BR parameters.
+ #
+ self.vapi.map_param_set_fragmentation(inner=1, ignore_df=0)
+ self.vapi.map_param_set_fragmentation(inner=0, ignore_df=0)
+ self.vapi.map_param_set_icmp(ip4_err_relay_src=self.pg0.local_ip4)
+ self.vapi.map_param_set_traffic_class(copy=1)
+
+ #
+ # Enable MAP-T on interfaces.
+ #
+ self.vapi.map_if_enable_disable(is_enable=1,
+ sw_if_index=self.pg0.sw_if_index,
+ is_translation=1)
+
+ self.vapi.map_if_enable_disable(is_enable=1,
+ sw_if_index=self.pg1.sw_if_index,
+ is_translation=1)
+
+ self.vapi.map_if_enable_disable(is_enable=1,
+ sw_if_index=self.pg1.sw_if_index,
+ is_translation=1)
+
+ def tearDown(self):
+ super(TestMAPBR, self).tearDown()
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.unconfig_ip6()
+ i.admin_down()
+
+ def v4_address_check(self, pkt):
+ self.assertEqual(pkt[IP].src, self.ipv4_map_address)
+ self.assertEqual(pkt[IP].dst, self.ipv4_internet_address)
+
+ def v4_port_check(self, pkt, proto):
+ self.assertEqual(pkt[proto].sport, self.ipv4_udp_or_tcp_map_port)
+ self.assertEqual(pkt[proto].dport, self.ipv4_udp_or_tcp_internet_port)
+
+ def v6_address_check(self, pkt):
+ self.assertEqual(pkt[IPv6].src, self.ipv6_map_address)
+ self.assertEqual(pkt[IPv6].dst, self.ipv6_cpe_address)
+
+ def v6_port_check(self, pkt, proto):
+ self.assertEqual(pkt[proto].sport, self.ipv6_udp_or_tcp_internet_port)
+ self.assertEqual(pkt[proto].dport, self.ipv6_udp_or_tcp_map_port)
+
+ #
+ # Normal translation of UDP packets v4 -> v6 direction
+ # Send 128 frame size packet for IPv4/UDP.
+ # Received packet should be translated into IPv6 packet with no
+ # fragment header.
+ #
+
+ def test_map_t_udp_ip4_to_ip6(self):
+ """ MAP-T UDP IPv4 -> IPv6 """
+
+ eth = Ether(src=self.pg0.remote_mac,
+ dst=self.pg0.local_mac)
+ ip = IP(src=self.pg0.remote_ip4,
+ dst=self.ipv4_map_address,
+ tos=0)
+ udp = UDP(sport=self.ipv4_udp_or_tcp_internet_port,
+ dport=self.ipv4_udp_or_tcp_map_port)
+ payload = "a" * 82
+ tx_pkt = eth / ip / udp / payload
+
+ self.pg_send(self.pg0, tx_pkt * 1)
+
+ rx_pkts = self.pg1.get_capture(1)
+ rx_pkt = rx_pkts[0]
+
+ self.v6_address_check(rx_pkt)
+ self.v6_port_check(rx_pkt, UDP)
+ self.assertEqual(rx_pkt[IPv6].tc, 0) # IPv4 ToS passed to v6 TC
+ self.assertEqual(rx_pkt[IPv6].nh, IPv6(nh="UDP").nh)
+
+ #
+ # Normal translation of TCP packets v4 -> v6 direction.
+ # Send 128 frame size packet for IPv4/TCP.
+ # Received packet should be translated into IPv6 packet with no
+ # fragment header.
+ #
+
+ def test_map_t_tcp_ip4_to_ip6(self):
+ """ MAP-T TCP IPv4 -> IPv6 """
+
+ eth = Ether(src=self.pg0.remote_mac,
+ dst=self.pg0.local_mac)
+ ip = IP(src=self.pg0.remote_ip4,
+ dst=self.ipv4_map_address,
+ tos=0)
+ tcp = TCP(sport=self.ipv4_udp_or_tcp_internet_port,
+ dport=self.ipv4_udp_or_tcp_map_port)
+ payload = "a" * 82
+ tx_pkt = eth / ip / tcp / payload
+
+ self.pg_send(self.pg0, tx_pkt * 1)
+
+ rx_pkts = self.pg1.get_capture(1)
+ rx_pkt = rx_pkts[0]
+
+ self.v6_address_check(rx_pkt)
+ self.v6_port_check(rx_pkt, TCP)
+ self.assertEqual(rx_pkt[IPv6].tc, 0) # IPv4 ToS passed to v6 TC
+ self.assertEqual(rx_pkt[IPv6].nh, IPv6(nh="TCP").nh)
+
+ #
+ # Normal translation of UDP packets v6 -> v4 direction
+ # Send 128 frame size packet for IPv6/UDP.
+ # Received packet should be translated into an IPv4 packet with DF=1.
+ #
+
+ def test_map_t_udp_ip6_to_ip4(self):
+ """ MAP-T UDP IPv6 -> IPv4 """
+
+ eth = Ether(src=self.pg1.remote_mac,
+ dst=self.pg1.local_mac)
+ ip = IPv6(src=self.ipv6_cpe_address,
+ dst=self.ipv6_map_address)
+ udp = UDP(sport=self.ipv6_udp_or_tcp_map_port,
+ dport=self.ipv6_udp_or_tcp_internet_port)
+ payload = "a" * 82
+ tx_pkt = eth / ip / udp / payload
+
+ self.pg_send(self.pg1, tx_pkt * 1)
+
+ rx_pkts = self.pg0.get_capture(1)
+ rx_pkt = rx_pkts[0]
+
+ self.v4_address_check(rx_pkt)
+ self.v4_port_check(rx_pkt, UDP)
+ self.assertEqual(rx_pkt[IP].proto, IP(proto="udp").proto)
+ self.assertEqual(rx_pkt[IP].tos, 0) # IPv6 TC passed to v4 ToS
+ df_bit = IP(flags="DF").flags
+ self.assertNotEqual(rx_pkt[IP].flags & df_bit, df_bit)
+
+ #
+ # Normal translation of TCP packets v6 -> v4 direction
+ # Send 128 frame size packet for IPv6/TCP.
+ # Received packet should be translated into an IPv4 packet with DF=1
+ #
+
+ def test_map_t_tcp_ip6_to_ip4(self):
+ """ MAP-T TCP IPv6 -> IPv4 """
+
+ eth = Ether(src=self.pg1.remote_mac,
+ dst=self.pg1.local_mac)
+ ip = IPv6(src=self.ipv6_cpe_address,
+ dst=self.ipv6_map_address)
+ tcp = TCP(sport=self.ipv6_udp_or_tcp_map_port,
+ dport=self.ipv6_udp_or_tcp_internet_port)
+ payload = "a" * 82
+ tx_pkt = eth / ip / tcp / payload
+
+ self.pg_send(self.pg1, tx_pkt * 1)
+
+ rx_pkts = self.pg0.get_capture(1)
+ rx_pkt = rx_pkts[0]
+
+ self.v4_address_check(rx_pkt)
+ self.v4_port_check(rx_pkt, TCP)
+ self.assertEqual(rx_pkt[IP].proto, IP(proto="tcp").proto)
+ self.assertEqual(rx_pkt[IP].tos, 0) # IPv6 TC passed to v4 ToS
+ df_bit = IP(flags="DF").flags
+ self.assertNotEqual(rx_pkt[IP].flags & df_bit, df_bit)
+
+ #
+ # Translation of ICMP Echo Request v4 -> v6 direction
+ # Received packet should be translated into an IPv6 Echo Request.
+ #
+
+ def test_map_t_echo_request_ip4_to_ip6(self):
+ """ MAP-T echo request IPv4 -> IPv6 """
+
+ eth = Ether(src=self.pg1.remote_mac,
+ dst=self.pg1.local_mac)
+ ip = IP(src=self.pg0.remote_ip4,
+ dst=self.ipv4_map_address)
+ icmp = ICMP(type="echo-request",
+ id=self.ipv6_udp_or_tcp_map_port)
+ payload = "H" * 10
+ tx_pkt = eth / ip / icmp / payload
+
+ self.pg_send(self.pg0, tx_pkt * 1)
+
+ rx_pkts = self.pg1.get_capture(1)
+ rx_pkt = rx_pkts[0]
+
+ self.assertEqual(rx_pkt[IPv6].nh, IPv6(nh="ICMPv6").nh)
+ self.assertEqual(rx_pkt[ICMPv6EchoRequest].type,
+ ICMPv6EchoRequest(type="Echo Request").type)
+ self.assertEqual(rx_pkt[ICMPv6EchoRequest].code, 0)
+ self.assertEqual(rx_pkt[ICMPv6EchoRequest].id,
+ self.ipv6_udp_or_tcp_map_port)
+
+ #
+ # Translation of ICMP Echo Reply v4 -> v6 direction
+ # Received packet should be translated into an IPv6 Echo Reply.
+ #
+
+ def test_map_t_echo_reply_ip4_to_ip6(self):
+ """ MAP-T echo reply IPv4 -> IPv6 """
+
+ eth = Ether(src=self.pg1.remote_mac,
+ dst=self.pg1.local_mac)
+ ip = IP(src=self.pg0.remote_ip4,
+ dst=self.ipv4_map_address)
+ icmp = ICMP(type="echo-reply",
+ id=self.ipv6_udp_or_tcp_map_port)
+ payload = "H" * 10
+ tx_pkt = eth / ip / icmp / payload
+
+ self.pg_send(self.pg0, tx_pkt * 1)
+
+ rx_pkts = self.pg1.get_capture(1)
+ rx_pkt = rx_pkts[0]
+
+ self.assertEqual(rx_pkt[IPv6].nh, IPv6(nh="ICMPv6").nh)
+ self.assertEqual(rx_pkt[ICMPv6EchoReply].type,
+ ICMPv6EchoReply(type="Echo Reply").type)
+ self.assertEqual(rx_pkt[ICMPv6EchoReply].code, 0)
+ self.assertEqual(rx_pkt[ICMPv6EchoReply].id,
+ self.ipv6_udp_or_tcp_map_port)
+
+ #
+ # Translation of ICMP Time Exceeded v4 -> v6 direction
+ # Received packet should be translated into an IPv6 Time Exceeded.
+ #
+
+ def test_map_t_time_exceeded_ip4_to_ip6(self):
+ """ MAP-T time exceeded IPv4 -> IPv6 """
+
+ eth = Ether(src=self.pg0.remote_mac,
+ dst=self.pg0.local_mac)
+ ip = IP(src=self.pg0.remote_ip4,
+ dst=self.ipv4_map_address)
+ icmp = ICMP(type="time-exceeded", code="ttl-zero-during-transit")
+ ip_inner = IP(dst=self.pg0.remote_ip4,
+ src=self.ipv4_map_address, ttl=1)
+ udp_inner = UDP(sport=self.ipv4_udp_or_tcp_map_port,
+ dport=self.ipv4_udp_or_tcp_internet_port)
+ payload = "H" * 10
+ tx_pkt = eth / ip / icmp / ip_inner / udp_inner / payload
+
+ self.pg_send(self.pg0, tx_pkt * 1)
+
+ rx_pkts = self.pg1.get_capture(1)
+ rx_pkt = rx_pkts[0]
+
+ self.v6_address_check(rx_pkt)
+ self.assertEqual(rx_pkt[IPv6].nh, IPv6(nh="ICMPv6").nh)
+ self.assertEqual(rx_pkt[ICMPv6TimeExceeded].type,
+ ICMPv6TimeExceeded().type)
+ self.assertEqual(rx_pkt[ICMPv6TimeExceeded].code,
+ ICMPv6TimeExceeded(
+ code="hop limit exceeded in transit").code)
+ self.assertEqual(rx_pkt[ICMPv6TimeExceeded].hlim, tx_pkt[IP][1].ttl)
+ self.assertTrue(rx_pkt.haslayer(IPerror6))
+ self.assertTrue(rx_pkt.haslayer(UDPerror))
+ self.assertEqual(rx_pkt[IPv6].src, rx_pkt[IPerror6].dst)
+ self.assertEqual(rx_pkt[IPv6].dst, rx_pkt[IPerror6].src)
+ self.assertEqual(rx_pkt[UDPerror].sport, self.ipv6_udp_or_tcp_map_port)
+ self.assertEqual(rx_pkt[UDPerror].dport,
+ self.ipv6_udp_or_tcp_internet_port)
+
+ #
+ # Translation of ICMP Echo Request v6 -> v4 direction
+ # Received packet should be translated into an IPv4 Echo Request.
+ #
+
+ def test_map_t_echo_request_ip6_to_ip4(self):
+ """ MAP-T echo request IPv6 -> IPv4 """
+
+ eth = Ether(src=self.pg1.remote_mac,
+ dst=self.pg1.local_mac)
+ ip = IPv6(src=self.ipv6_cpe_address,
+ dst=self.ipv6_map_address)
+ icmp = ICMPv6EchoRequest()
+ icmp.id = self.ipv6_udp_or_tcp_map_port
+ payload = "H" * 10
+ tx_pkt = eth / ip / icmp / payload
+
+ self.pg_send(self.pg1, tx_pkt * 1)
+
+ rx_pkts = self.pg0.get_capture(1)
+ rx_pkt = rx_pkts[0]
+
+ self.assertEqual(rx_pkt[IP].proto, IP(proto="icmp").proto)
+ self.assertEqual(rx_pkt[ICMP].type, ICMP(type="echo-request").type)
+ self.assertEqual(rx_pkt[ICMP].code, 0)
+ self.assertEqual(rx_pkt[ICMP].id, self.ipv6_udp_or_tcp_map_port)
+
+ #
+ # Translation of ICMP Echo Reply v6 -> v4 direction
+ # Received packet should be translated into an IPv4 Echo Reply.
+ #
+
+ def test_map_t_echo_reply_ip6_to_ip4(self):
+ """ MAP-T echo reply IPv6 -> IPv4 """
+
+ eth = Ether(src=self.pg1.remote_mac,
+ dst=self.pg1.local_mac)
+ ip = IPv6(src=self.ipv6_cpe_address,
+ dst=self.ipv6_map_address)
+ icmp = ICMPv6EchoReply(id=self.ipv6_udp_or_tcp_map_port)
+ payload = "H" * 10
+ tx_pkt = eth / ip / icmp / payload
+
+ self.pg_send(self.pg1, tx_pkt * 1)
+
+ rx_pkts = self.pg0.get_capture(1)
+ rx_pkt = rx_pkts[0]
+
+ self.assertEqual(rx_pkt[IP].proto, IP(proto="icmp").proto)
+ self.assertEqual(rx_pkt[ICMP].type, ICMP(type="echo-reply").type)
+ self.assertEqual(rx_pkt[ICMP].code, 0)
+ self.assertEqual(rx_pkt[ICMP].id, self.ipv6_udp_or_tcp_map_port)
+
+ #
+ # Translation of ICMP Packet Too Big v6 -> v4 direction
+ # Received packet should be translated into an IPv4 Dest Unreachable.
+ #
+
+ def test_map_t_packet_too_big_ip6_to_ip4(self):
+ """ MAP-T packet too big IPv6 -> IPv4 """
+
+ eth = Ether(src=self.pg1.remote_mac,
+ dst=self.pg1.local_mac)
+ ip = IPv6(src=self.ipv6_cpe_address,
+ dst=self.ipv6_map_address)
+ icmp = ICMPv6PacketTooBig(mtu=1280)
+ ip_inner = IPv6(src=self.ipv6_map_address,
+ dst=self.ipv6_cpe_address)
+ udp_inner = UDP(sport=self.ipv6_udp_or_tcp_internet_port,
+ dport=self.ipv6_udp_or_tcp_map_port)
+ payload = "H" * 10
+ tx_pkt = eth / ip / icmp / ip_inner / udp_inner / payload
+
+ self.pg_send(self.pg1, tx_pkt * 1)
+
+ rx_pkts = self.pg0.get_capture(1)
+ rx_pkt = rx_pkts[0]
+
+ self.v4_address_check(rx_pkt)
+ self.assertEqual(rx_pkt[IP].proto, IP(proto="icmp").proto)
+ self.assertEqual(rx_pkt[ICMP].type, ICMP(type="dest-unreach").type)
+ self.assertEqual(rx_pkt[ICMP].code,
+ ICMP(code="fragmentation-needed").code)
+ self.assertEqual(rx_pkt[ICMP].nexthopmtu,
+ tx_pkt[ICMPv6PacketTooBig].mtu - 20)
+ self.assertTrue(rx_pkt.haslayer(IPerror))
+ self.assertTrue(rx_pkt.haslayer(UDPerror))
+ self.assertEqual(rx_pkt[IP].src, rx_pkt[IPerror].dst)
+ self.assertEqual(rx_pkt[IP].dst, rx_pkt[IPerror].src)
+ self.assertEqual(rx_pkt[UDPerror].sport,
+ self.ipv4_udp_or_tcp_internet_port)
+ self.assertEqual(rx_pkt[UDPerror].dport, self.ipv4_udp_or_tcp_map_port)
+
+ #
+ # Translation of ICMP Time Exceeded v6 -> v4 direction
+ # Received packet should be translated into an IPv4 Time Exceeded.
+ #
+
+ def test_map_t_time_exceeded_ip6_to_ip4(self):
+ """ MAP-T time exceeded IPv6 -> IPv4 """
+
+ eth = Ether(src=self.pg1.remote_mac,
+ dst=self.pg1.local_mac)
+ ip = IPv6(src=self.ipv6_cpe_address,
+ dst=self.ipv6_map_address)
+ icmp = ICMPv6TimeExceeded()
+ ip_inner = IPv6(src=self.ipv6_map_address,
+ dst=self.ipv6_cpe_address, hlim=1)
+ udp_inner = UDP(sport=self.ipv6_udp_or_tcp_internet_port,
+ dport=self.ipv6_udp_or_tcp_map_port)
+ payload = "H" * 10
+ tx_pkt = eth / ip / icmp / ip_inner / udp_inner / payload
+
+ self.pg_send(self.pg1, tx_pkt * 1)
+
+ rx_pkts = self.pg0.get_capture(1)
+ rx_pkt = rx_pkts[0]
+
+ self.v4_address_check(rx_pkt)
+ self.assertEqual(rx_pkt[IP].proto, IP(proto="icmp").proto)
+ self.assertEqual(rx_pkt[ICMP].type, ICMP(type="time-exceeded").type)
+ self.assertEqual(rx_pkt[ICMP].code,
+ ICMP(code="ttl-zero-during-transit").code)
+ self.assertEqual(rx_pkt[ICMP].ttl, tx_pkt[IPv6][1].hlim)
+ self.assertTrue(rx_pkt.haslayer(IPerror))
+ self.assertTrue(rx_pkt.haslayer(UDPerror))
+ self.assertEqual(rx_pkt[IP].src, rx_pkt[IPerror].dst)
+ self.assertEqual(rx_pkt[IP].dst, rx_pkt[IPerror].src)
+ self.assertEqual(rx_pkt[UDPerror].sport,
+ self.ipv4_udp_or_tcp_internet_port)
+ self.assertEqual(rx_pkt[UDPerror].dport, self.ipv4_udp_or_tcp_map_port)
+
+ #
+ # Spoofed IPv4 Source Address v6 -> v4 direction
+ # Send a packet with a wrong IPv4 address embedded in bits 72-103.
+ # The BR should either drop the packet, or rewrite the spoofed
+ # source IPv4 as the actual source IPv4 address.
+ # The BR really should drop the packet.
+ #
+
+ def test_map_t_spoof_ipv4_src_addr_ip6_to_ip4(self):
+ """ MAP-T spoof ipv4 src addr IPv6 -> IPv4 """
+
+ eth = Ether(src=self.pg1.remote_mac,
+ dst=self.pg1.local_mac)
+ ip = IPv6(src=self.ipv6_spoof_address,
+ dst=self.ipv6_map_address)
+ udp = UDP(sport=self.ipv6_udp_or_tcp_map_port,
+ dport=self.ipv6_udp_or_tcp_internet_port)
+ payload = "a" * 82
+ tx_pkt = eth / ip / udp / payload
+
+ self.pg_send(self.pg1, tx_pkt * 1)
+
+ self.pg0.get_capture(0, timeout=1)
+ self.pg0.assert_nothing_captured("Should drop IPv4 spoof address")
+
+ #
+ # Spoofed IPv4 Source Prefix v6 -> v4 direction
+ # Send a packet with a wrong IPv4 prefix embedded in bits 72-103.
+ # The BR should either drop the packet, or rewrite the source IPv4
+ # to the prefix that matches the source IPv4 address.
+ #
+
+ def test_map_t_spoof_ipv4_src_prefix_ip6_to_ip4(self):
+ """ MAP-T spoof ipv4 src prefix IPv6 -> IPv4 """
+
+ eth = Ether(src=self.pg1.remote_mac,
+ dst=self.pg1.local_mac)
+ ip = IPv6(src=self.ipv6_spoof_prefix,
+ dst=self.ipv6_map_address)
+ udp = UDP(sport=self.ipv6_udp_or_tcp_map_port,
+ dport=self.ipv6_udp_or_tcp_internet_port)
+ payload = "a" * 82
+ tx_pkt = eth / ip / udp / payload
+
+ self.pg_send(self.pg1, tx_pkt * 1)
+
+ self.pg0.get_capture(0, timeout=1)
+ self.pg0.assert_nothing_captured("Should drop IPv4 spoof prefix")
+
+ #
+ # Spoofed IPv6 PSID v6 -> v4 direction
+ # Send a packet with a wrong IPv6 port PSID
+ # The BR should drop the packet.
+ #
+
+ def test_map_t_spoof_psid_ip6_to_ip4(self):
+ """ MAP-T spoof psid IPv6 -> IPv4 """
+
+ eth = Ether(src=self.pg1.remote_mac,
+ dst=self.pg1.local_mac)
+ ip = IPv6(src=self.ipv6_spoof_psid,
+ dst=self.ipv6_map_address)
+ udp = UDP(sport=self.ipv6_udp_or_tcp_map_port,
+ dport=self.ipv6_udp_or_tcp_internet_port)
+ payload = "a" * 82
+ tx_pkt = eth / ip / udp / payload
+
+ self.pg_send(self.pg1, tx_pkt * 1)
+
+ self.pg0.get_capture(0, timeout=1)
+ self.pg0.assert_nothing_captured("Should drop IPv6 spoof PSID")
+
+ #
+ # Spoofed IPv6 subnet field v6 -> v4 direction
+ # Send a packet with a wrong IPv6 subnet as "2001:db8:f1"
+ # The BR should drop the packet.
+ #
+
+ def test_map_t_spoof_subnet_ip6_to_ip4(self):
+ """ MAP-T spoof subnet IPv6 -> IPv4 """
+
+ eth = Ether(src=self.pg1.remote_mac,
+ dst=self.pg1.local_mac)
+ ip = IPv6(src=self.ipv6_spoof_subnet,
+ dst=self.ipv6_map_address)
+ udp = UDP(sport=self.ipv6_udp_or_tcp_map_port,
+ dport=self.ipv6_udp_or_tcp_internet_port)
+ payload = "a" * 82
+ tx_pkt = eth / ip / udp / payload
+
+ self.pg_send(self.pg1, tx_pkt * 1)
+
+ self.pg0.get_capture(0, timeout=1)
+ self.pg0.assert_nothing_captured("Should drop IPv6 spoof subnet")
+
+ #
+ # Spoofed IPv6 port PSID v6 -> v4 direction
+ # Send a packet with a wrong IPv6 port PSID
+ # The BR should drop the packet.
+ #
+
+ def test_map_t_spoof_port_psid_ip6_to_ip4(self):
+ """ MAP-T spoof port psid IPv6 -> IPv4 """
+
+ eth = Ether(src=self.pg1.remote_mac,
+ dst=self.pg1.local_mac)
+ ip = IPv6(src=self.ipv6_cpe_address,
+ dst=self.ipv6_map_address)
+ udp = UDP(sport=self.ipv6_udp_or_tcp_spoof_port,
+ dport=self.ipv6_udp_or_tcp_internet_port)
+ payload = "a" * 82
+ tx_pkt = eth / ip / udp / payload
+
+ self.pg_send(self.pg1, tx_pkt * 1)
+
+ self.pg0.get_capture(0, timeout=1)
+ self.pg0.assert_nothing_captured("Should drop IPv6 spoof port PSID")
+
+ #
+ # Spoofed IPv6 ICMP ID PSID v6 -> v4 direction
+ # Send a packet with a wrong IPv6 IMCP ID PSID
+ # The BR should drop the packet.
+ #
+
+ def test_map_t_spoof_icmp_id_psid_ip6_to_ip4(self):
+ """ MAP-T spoof ICMP id psid IPv6 -> IPv4 """
+
+ eth = Ether(src=self.pg1.remote_mac,
+ dst=self.pg1.local_mac)
+ ip = IPv6(src=self.ipv6_cpe_address,
+ dst=self.ipv6_map_address)
+ icmp = ICMPv6EchoRequest()
+ icmp.id = self.ipv6_udp_or_tcp_spoof_port
+ payload = "H" * 10
+ tx_pkt = eth / ip / icmp / payload
+
+ self.pg_send(self.pg1, tx_pkt * 1)
+
+ self.pg0.get_capture(0, timeout=1)
+ self.pg0.assert_nothing_captured("Should drop IPv6 spoof port PSID")
+
+ #
+ # Map to Map - same rule, different address
+ #
+
+ @unittest.skip("Fixme: correct behavior needs clarification")
+ def test_map_t_same_rule_diff_addr_ip6_to_ip4(self):
+ """ MAP-T same rule, diff addr IPv6 -> IPv6 """
+
+ eth = Ether(src=self.pg1.remote_mac,
+ dst=self.pg1.local_mac)
+ ip = IPv6(src=self.ipv6_cpe_address,
+ dst=self.ipv6_map_same_rule_diff_addr)
+ udp = UDP(sport=self.ipv6_udp_or_tcp_map_port,
+ dport=1025)
+ payload = "a" * 82
+ tx_pkt = eth / ip / udp / payload
+
+ self.pg_send(self.pg1, tx_pkt * 1)
+
+ rx_pkts = self.pg1.get_capture(1)
+ rx_pkt = rx_pkts[0]
+
+ #
+ # Map to Map - same rule, same address
+ #
+
+ @unittest.skip("Fixme: correct behavior needs clarification")
+ def test_map_t_same_rule_same_addr_ip6_to_ip4(self):
+ """ MAP-T same rule, same addr IPv6 -> IPv6 """
+
+ eth = Ether(src=self.pg1.remote_mac,
+ dst=self.pg1.local_mac)
+ ip = IPv6(src=self.ipv6_cpe_address,
+ dst=self.ipv6_map_same_rule_same_addr)
+ udp = UDP(sport=self.ipv6_udp_or_tcp_map_port,
+ dport=1025)
+ payload = "a" * 82
+ tx_pkt = eth / ip / udp / payload
+
+ self.pg_send(self.pg1, tx_pkt * 1)
+
+ rx_pkts = self.pg1.get_capture(1)
+ rx_pkt = rx_pkts[0]
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_memif.py b/test/test_memif.py
new file mode 100644
index 00000000000..fc7cf9b2e7e
--- /dev/null
+++ b/test/test_memif.py
@@ -0,0 +1,308 @@
+import socket
+import unittest
+
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, ICMP
+
+from framework import VppTestCase, VppTestRunner, running_extended_tests
+from framework import tag_run_solo
+from remote_test import RemoteClass, RemoteVppTestCase
+from vpp_memif import remove_all_memif_vpp_config, \
+ VppSocketFilename, VppMemif
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_papi import VppEnum
+
+
+@tag_run_solo
+class TestMemif(VppTestCase):
+ """ Memif Test Case """
+ remote_class = RemoteVppTestCase
+
+ @classmethod
+ def get_cpus_required(cls):
+ return (super().get_cpus_required() +
+ cls.remote_class.get_cpus_required())
+
+ @classmethod
+ def assign_cpus(cls, cpus):
+ remote_cpus = cpus[:cls.remote_class.get_cpus_required()]
+ my_cpus = cpus[cls.remote_class.get_cpus_required():]
+ cls.remote_class.assign_cpus(remote_cpus)
+ super().assign_cpus(my_cpus)
+
+ @classmethod
+ def setUpClass(cls):
+ # fork new process before client connects to VPP
+ cls.remote_test = RemoteClass(cls.remote_class)
+ cls.remote_test.start_remote()
+ cls.remote_test.set_request_timeout(10)
+ super(TestMemif, cls).setUpClass()
+ cls.remote_test.setUpClass(cls.tempdir)
+ cls.create_pg_interfaces(range(1))
+ for pg in cls.pg_interfaces:
+ pg.config_ip4()
+ pg.admin_up()
+ pg.resolve_arp()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.remote_test.tearDownClass()
+ cls.remote_test.quit_remote()
+ for pg in cls.pg_interfaces:
+ pg.unconfig_ip4()
+ pg.set_table_ip4(0)
+ pg.admin_down()
+ super(TestMemif, cls).tearDownClass()
+
+ def tearDown(self):
+ remove_all_memif_vpp_config(self)
+ remove_all_memif_vpp_config(self.remote_test)
+ super(TestMemif, self).tearDown()
+
+ def _check_socket_filename(self, dump, socket_id, filename):
+ for d in dump:
+ if (d.socket_id == socket_id) and (
+ d.socket_filename == filename):
+ return True
+ return False
+
+ def test_memif_socket_filename_add_del(self):
+ """ Memif socket filename add/del """
+
+ # dump default socket filename
+ dump = self.vapi.memif_socket_filename_dump()
+ self.assertTrue(
+ self._check_socket_filename(
+ dump, 0, "%s/memif.sock" % self.tempdir))
+
+ memif_sockets = []
+ # existing path
+ memif_sockets.append(
+ VppSocketFilename(
+ self, 1, "%s/memif1.sock" % self.tempdir))
+ # default path (test tempdir)
+ memif_sockets.append(
+ VppSocketFilename(
+ self,
+ 2,
+ "memif2.sock",
+ add_default_folder=True))
+ # create new folder in default folder
+ memif_sockets.append(
+ VppSocketFilename(
+ self,
+ 3,
+ "sock/memif3.sock",
+ add_default_folder=True))
+
+ for sock in memif_sockets:
+ sock.add_vpp_config()
+ dump = sock.query_vpp_config()
+ self.assertTrue(
+ self._check_socket_filename(
+ dump,
+ sock.socket_id,
+ sock.socket_filename))
+
+ for sock in memif_sockets:
+ sock.remove_vpp_config()
+
+ dump = self.vapi.memif_socket_filename_dump()
+ self.assertTrue(
+ self._check_socket_filename(
+ dump, 0, "%s/memif.sock" % self.tempdir))
+
+ def _create_delete_test_one_interface(self, memif):
+ memif.add_vpp_config()
+
+ dump = memif.query_vpp_config()
+
+ self.assertTrue(dump)
+ self.assertEqual(dump.sw_if_index, memif.sw_if_index)
+ self.assertEqual(dump.role, memif.role)
+ self.assertEqual(dump.mode, memif.mode)
+ if (memif.socket_id is not None):
+ self.assertEqual(dump.socket_id, memif.socket_id)
+
+ memif.remove_vpp_config()
+
+ dump = memif.query_vpp_config()
+
+ self.assertFalse(dump)
+
+ def _connect_test_one_interface(self, memif):
+ self.assertTrue(memif.wait_for_link_up(5))
+ dump = memif.query_vpp_config()
+
+ if memif.role == VppEnum.vl_api_memif_role_t.MEMIF_ROLE_API_SLAVE:
+ self.assertEqual(dump.ring_size, memif.ring_size)
+ self.assertEqual(dump.buffer_size, memif.buffer_size)
+ else:
+ self.assertEqual(dump.ring_size, 1)
+ self.assertEqual(dump.buffer_size, 0)
+
+ def _connect_test_interface_pair(self, memif0, memif1):
+ memif0.add_vpp_config()
+ memif1.add_vpp_config()
+
+ memif0.admin_up()
+ memif1.admin_up()
+
+ self._connect_test_one_interface(memif0)
+ self._connect_test_one_interface(memif1)
+
+ memif0.remove_vpp_config()
+ memif1.remove_vpp_config()
+
+ def test_memif_create_delete(self):
+ """ Memif create/delete interface """
+
+ memif = VppMemif(
+ self,
+ VppEnum.vl_api_memif_role_t.MEMIF_ROLE_API_SLAVE,
+ VppEnum.vl_api_memif_mode_t.MEMIF_MODE_API_ETHERNET)
+ self._create_delete_test_one_interface(memif)
+ memif.role = VppEnum.vl_api_memif_role_t.MEMIF_ROLE_API_MASTER
+ self._create_delete_test_one_interface(memif)
+
+ def test_memif_create_custom_socket(self):
+ """ Memif create with non-default socket filename """
+
+ memif_sockets = []
+ # existing path
+ memif_sockets.append(
+ VppSocketFilename(
+ self, 1, "%s/memif1.sock" % self.tempdir))
+ # default path (test tempdir)
+ memif_sockets.append(
+ VppSocketFilename(
+ self,
+ 2,
+ "memif2.sock",
+ add_default_folder=True))
+ # create new folder in default folder
+ memif_sockets.append(
+ VppSocketFilename(
+ self,
+ 3,
+ "sock/memif3.sock",
+ add_default_folder=True))
+
+ memif = VppMemif(
+ self,
+ VppEnum.vl_api_memif_role_t.MEMIF_ROLE_API_SLAVE,
+ VppEnum.vl_api_memif_mode_t.MEMIF_MODE_API_ETHERNET)
+
+ for sock in memif_sockets:
+ sock.add_vpp_config()
+ memif.socket_id = sock.socket_id
+ memif.role = VppEnum.vl_api_memif_role_t.MEMIF_ROLE_API_SLAVE
+ self._create_delete_test_one_interface(memif)
+ memif.role = VppEnum.vl_api_memif_role_t.MEMIF_ROLE_API_MASTER
+ self._create_delete_test_one_interface(memif)
+
+ def test_memif_connect(self):
+ """ Memif connect """
+ memif = VppMemif(
+ self,
+ VppEnum.vl_api_memif_role_t.MEMIF_ROLE_API_SLAVE,
+ VppEnum.vl_api_memif_mode_t.MEMIF_MODE_API_ETHERNET,
+ ring_size=1024,
+ buffer_size=2048,
+ secret="abc")
+
+ remote_socket = VppSocketFilename(self.remote_test, 1,
+ "%s/memif.sock" % self.tempdir)
+ remote_socket.add_vpp_config()
+
+ remote_memif = VppMemif(
+ self.remote_test,
+ VppEnum.vl_api_memif_role_t.MEMIF_ROLE_API_MASTER,
+ VppEnum.vl_api_memif_mode_t.MEMIF_MODE_API_ETHERNET,
+ socket_id=1,
+ ring_size=1024,
+ buffer_size=2048,
+ secret="abc")
+
+ self._connect_test_interface_pair(memif, remote_memif)
+
+ memif.role = VppEnum.vl_api_memif_role_t.MEMIF_ROLE_API_MASTER
+ remote_memif.role = VppEnum.vl_api_memif_role_t.MEMIF_ROLE_API_SLAVE
+
+ self._connect_test_interface_pair(memif, remote_memif)
+
+ def _create_icmp(self, pg, memif, num):
+ pkts = []
+ for i in range(num):
+ pkt = (Ether(dst=pg.local_mac, src=pg.remote_mac) /
+ IP(src=pg.remote_ip4,
+ dst=str(memif.ip_prefix.network_address)) /
+ ICMP(id=memif.if_id, type='echo-request', seq=i))
+ pkts.append(pkt)
+ return pkts
+
+ def _verify_icmp(self, pg, memif, rx, seq):
+ ip = rx[IP]
+ self.assertEqual(ip.src, str(memif.ip_prefix.network_address))
+ self.assertEqual(ip.dst, pg.remote_ip4)
+ self.assertEqual(ip.proto, 1)
+ icmp = rx[ICMP]
+ self.assertEqual(icmp.type, 0) # echo-reply
+ self.assertEqual(icmp.id, memif.if_id)
+ self.assertEqual(icmp.seq, seq)
+
+ def test_memif_ping(self):
+ """ Memif ping """
+
+ memif = VppMemif(
+ self,
+ VppEnum.vl_api_memif_role_t.MEMIF_ROLE_API_SLAVE,
+ VppEnum.vl_api_memif_mode_t.MEMIF_MODE_API_ETHERNET)
+
+ remote_socket = VppSocketFilename(self.remote_test, 1,
+ "%s/memif.sock" % self.tempdir)
+ remote_socket.add_vpp_config()
+
+ remote_memif = VppMemif(
+ self.remote_test,
+ VppEnum.vl_api_memif_role_t.MEMIF_ROLE_API_MASTER,
+ VppEnum.vl_api_memif_mode_t.MEMIF_MODE_API_ETHERNET,
+ socket_id=1)
+
+ memif.add_vpp_config()
+ memif.config_ip4()
+ memif.admin_up()
+
+ remote_memif.add_vpp_config()
+ remote_memif.config_ip4()
+ remote_memif.admin_up()
+
+ self.assertTrue(memif.wait_for_link_up(5))
+ self.assertTrue(remote_memif.wait_for_link_up(5))
+
+ # add routing to remote vpp
+ route = VppIpRoute(self.remote_test, self.pg0._local_ip4_subnet, 24,
+ [VppRoutePath(memif.ip_prefix.network_address,
+ 0xffffffff)],
+ register=False)
+
+ route.add_vpp_config()
+
+ # create ICMP echo-request from local pg to remote memif
+ packet_num = 10
+ pkts = self._create_icmp(self.pg0, remote_memif, packet_num)
+
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(packet_num, timeout=2)
+ seq = 0
+ for c in capture:
+ self._verify_icmp(self.pg0, remote_memif, c, seq)
+ seq += 1
+
+ route.remove_vpp_config()
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_mss_clamp.py b/test/test_mss_clamp.py
new file mode 100644
index 00000000000..23495b6050b
--- /dev/null
+++ b/test/test_mss_clamp.py
@@ -0,0 +1,295 @@
+#!/usr/bin/env python3
+
+import unittest
+
+from framework import VppTestCase, VppTestRunner
+
+from scapy.layers.inet import IP, TCP
+from scapy.layers.inet6 import IPv6
+from scapy.layers.l2 import Ether
+from scapy.packet import Raw
+
+
+class TestMSSClamp(VppTestCase):
+ """ TCP MSS Clamping Test Case """
+
+ def setUp(self):
+ super(TestMSSClamp, self).setUp()
+
+ # create 2 pg interfaces
+ self.create_pg_interfaces(range(2))
+
+ for i in self.pg_interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+ i.config_ip6()
+ i.resolve_ndp()
+
+ def tearDown(self):
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.unconfig_ip6()
+ i.admin_down()
+ super(TestMSSClamp, self).tearDown()
+
+ def verify_pkt(self, rx, expected_mss):
+ # check that the MSS size equals the expected value
+ # and the IP and TCP checksums are correct
+ tcp = rx[TCP]
+ tcp_csum = tcp.chksum
+ del tcp.chksum
+ ip_csum = 0
+ if (rx.haslayer(IP)):
+ ip_csum = rx[IP].chksum
+ del rx[IP].chksum
+
+ opt = tcp.options
+ self.assertEqual(opt[0][0], 'MSS')
+ self.assertEqual(opt[0][1], expected_mss)
+ # recalculate checksums
+ rx = rx.__class__(bytes(rx))
+ tcp = rx[TCP]
+ self.assertEqual(tcp_csum, tcp.chksum)
+ if (rx.haslayer(IP)):
+ self.assertEqual(ip_csum, rx[IP].chksum)
+
+ def send_and_verify_ip4(self, src_pg, dst_pg, mss, expected_mss):
+ # IPv4 TCP packet with the requested MSS option.
+ # from a host on src_pg to a host on dst_pg.
+ p = (Ether(dst=src_pg.local_mac,
+ src=src_pg.remote_mac) /
+ IP(src=src_pg.remote_ip4,
+ dst=dst_pg.remote_ip4) /
+ TCP(sport=1234, dport=1234,
+ flags="S",
+ options=[('MSS', (mss)), ('EOL', None)]) /
+ Raw('\xa5' * 100))
+
+ rxs = self.send_and_expect(src_pg, p * 65, dst_pg)
+
+ for rx in rxs:
+ self.verify_pkt(rx, expected_mss)
+
+ def send_and_verify_ip6(self, src_pg, dst_pg, mss, expected_mss):
+ #
+ # IPv6 TCP packet with the requested MSS option.
+ # from a host on src_pg to a host on dst_pg.
+ #
+ p = (Ether(dst=src_pg.local_mac,
+ src=src_pg.remote_mac) /
+ IPv6(src=src_pg.remote_ip6,
+ dst=dst_pg.remote_ip6) /
+ TCP(sport=1234, dport=1234,
+ flags="S",
+ options=[('MSS', (mss)), ('EOL', None)]) /
+ Raw('\xa5' * 100))
+
+ rxs = self.send_and_expect(src_pg, p * 65, dst_pg)
+
+ for rx in rxs:
+ self.verify_pkt(rx, expected_mss)
+
+ def test_tcp_mss_clamping_ip4_tx(self):
+ """ IP4 TCP MSS Clamping TX """
+
+ # enable the TCP MSS clamping feature to lower the MSS to 1424.
+ self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
+ ipv4_mss=1424, ipv6_mss=0,
+ ipv4_direction=3, ipv6_direction=0)
+
+ # Verify that the feature is enabled.
+ rv, reply = self.vapi.mss_clamp_get(sw_if_index=self.pg1.sw_if_index)
+ self.assertEqual(reply[0].ipv4_mss, 1424)
+ self.assertEqual(reply[0].ipv4_direction, 3)
+
+ # Send syn packets and verify that the MSS value is lowered.
+ self.send_and_verify_ip4(self.pg0, self.pg1, 1460, 1424)
+
+ # check the stats
+ stats = self.statistics.get_counter(
+ '/err/tcp-mss-clamping-ip4-out/clamped')
+ self.assertEqual(sum(stats), 65)
+
+ # Send syn packets with small enough MSS values and verify they are
+ # unchanged.
+ self.send_and_verify_ip4(self.pg0, self.pg1, 1400, 1400)
+
+ # enable the the feature only in TX direction
+ # and change the max MSS value
+ self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
+ ipv4_mss=1420, ipv6_mss=0,
+ ipv4_direction=2, ipv6_direction=0)
+
+ # Send syn packets and verify that the MSS value is lowered.
+ self.send_and_verify_ip4(self.pg0, self.pg1, 1460, 1420)
+
+ # enable the the feature only in RX direction
+ self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
+ ipv4_mss=1424, ipv6_mss=0,
+ ipv4_direction=1, ipv6_direction=0)
+
+ # Send the packets again and ensure they are unchanged.
+ self.send_and_verify_ip4(self.pg0, self.pg1, 1460, 1460)
+
+ # disable the feature
+ self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
+ ipv4_mss=0, ipv6_mss=0,
+ ipv4_direction=0, ipv6_direction=0)
+
+ # Send the packets again and ensure they are unchanged.
+ self.send_and_verify_ip4(self.pg0, self.pg1, 1460, 1460)
+
+ def test_tcp_mss_clamping_ip4_rx(self):
+ """ IP4 TCP MSS Clamping RX """
+
+ # enable the TCP MSS clamping feature to lower the MSS to 1424.
+ self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
+ ipv4_mss=1424, ipv6_mss=0,
+ ipv4_direction=3, ipv6_direction=0)
+
+ # Verify that the feature is enabled.
+ rv, reply = self.vapi.mss_clamp_get(sw_if_index=self.pg1.sw_if_index)
+ self.assertEqual(reply[0].ipv4_mss, 1424)
+ self.assertEqual(reply[0].ipv4_direction, 3)
+
+ # Send syn packets and verify that the MSS value is lowered.
+ self.send_and_verify_ip4(self.pg1, self.pg0, 1460, 1424)
+
+ # check the stats
+ stats = self.statistics.get_counter(
+ '/err/tcp-mss-clamping-ip4-in/clamped')
+ self.assertEqual(sum(stats), 65)
+
+ # Send syn packets with small enough MSS values and verify they are
+ # unchanged.
+ self.send_and_verify_ip4(self.pg1, self.pg0, 1400, 1400)
+
+ # enable the the feature only in RX direction
+ # and change the max MSS value
+ self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
+ ipv4_mss=1420, ipv6_mss=0,
+ ipv4_direction=1, ipv6_direction=0)
+
+ # Send syn packets and verify that the MSS value is lowered.
+ self.send_and_verify_ip4(self.pg1, self.pg0, 1460, 1420)
+
+ # enable the the feature only in TX direction
+ self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
+ ipv4_mss=1424, ipv6_mss=0,
+ ipv4_direction=2, ipv6_direction=0)
+
+ # Send the packets again and ensure they are unchanged.
+ self.send_and_verify_ip4(self.pg1, self.pg0, 1460, 1460)
+
+ # disable the feature
+ self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
+ ipv4_mss=0, ipv6_mss=0,
+ ipv4_direction=0, ipv6_direction=0)
+
+ # Send the packets again and ensure they are unchanged.
+ self.send_and_verify_ip4(self.pg1, self.pg0, 1460, 1460)
+
+ def test_tcp_mss_clamping_ip6_tx(self):
+ """ IP6 TCP MSS Clamping TX """
+
+ # enable the TCP MSS clamping feature to lower the MSS to 1424.
+ self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
+ ipv4_mss=0, ipv6_mss=1424,
+ ipv4_direction=0, ipv6_direction=3)
+
+ # Verify that the feature is enabled.
+ rv, reply = self.vapi.mss_clamp_get(sw_if_index=self.pg1.sw_if_index)
+ self.assertEqual(reply[0].ipv6_mss, 1424)
+ self.assertEqual(reply[0].ipv6_direction, 3)
+
+ # Send syn packets and verify that the MSS value is lowered.
+ self.send_and_verify_ip6(self.pg0, self.pg1, 1460, 1424)
+
+ # check the stats
+ stats = self.statistics.get_counter(
+ '/err/tcp-mss-clamping-ip6-out/clamped')
+ self.assertEqual(sum(stats), 65)
+
+ # Send syn packets with small enough MSS values and verify they are
+ # unchanged.
+ self.send_and_verify_ip6(self.pg0, self.pg1, 1400, 1400)
+
+ # enable the the feature only in TX direction
+ # and change the max MSS value
+ self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
+ ipv4_mss=0, ipv6_mss=1420,
+ ipv4_direction=0, ipv6_direction=2)
+
+ # Send syn packets and verify that the MSS value is lowered.
+ self.send_and_verify_ip6(self.pg0, self.pg1, 1460, 1420)
+
+ # enable the the feature only in RX direction
+ self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
+ ipv4_mss=0, ipv6_mss=1424,
+ ipv4_direction=0, ipv6_direction=1)
+
+ # Send the packets again and ensure they are unchanged.
+ self.send_and_verify_ip6(self.pg0, self.pg1, 1460, 1460)
+
+ # disable the feature
+ self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
+ ipv4_mss=0, ipv6_mss=0,
+ ipv4_direction=0, ipv6_direction=0)
+
+ # Send the packets again and ensure they are unchanged.
+ self.send_and_verify_ip6(self.pg0, self.pg1, 1460, 1460)
+
+ def test_tcp_mss_clamping_ip6_rx(self):
+ """ IP6 TCP MSS Clamping RX """
+
+ # enable the TCP MSS clamping feature to lower the MSS to 1424.
+ self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
+ ipv4_mss=0, ipv6_mss=1424,
+ ipv4_direction=0, ipv6_direction=3)
+
+ # Verify that the feature is enabled.
+ rv, reply = self.vapi.mss_clamp_get(sw_if_index=self.pg1.sw_if_index)
+ self.assertEqual(reply[0].ipv6_mss, 1424)
+ self.assertEqual(reply[0].ipv6_direction, 3)
+
+ # Send syn packets and verify that the MSS value is lowered.
+ self.send_and_verify_ip6(self.pg1, self.pg0, 1460, 1424)
+
+ # check the stats
+ stats = self.statistics.get_counter(
+ '/err/tcp-mss-clamping-ip6-in/clamped')
+ self.assertEqual(sum(stats), 65)
+
+ # Send syn packets with small enough MSS values and verify they are
+ # unchanged.
+ self.send_and_verify_ip6(self.pg1, self.pg0, 1400, 1400)
+
+ # enable the the feature only in RX direction
+ # and change the max MSS value
+ self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
+ ipv4_mss=0, ipv6_mss=1420,
+ ipv4_direction=0, ipv6_direction=1)
+
+ # Send syn packets and verify that the MSS value is lowered.
+ self.send_and_verify_ip6(self.pg1, self.pg0, 1460, 1420)
+
+ # enable the the feature only in TX direction
+ self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
+ ipv4_mss=0, ipv6_mss=1424,
+ ipv4_direction=0, ipv6_direction=2)
+
+ # Send the packets again and ensure they are unchanged.
+ self.send_and_verify_ip6(self.pg1, self.pg0, 1460, 1460)
+
+ # disable the feature
+ self.vapi.mss_clamp_enable_disable(self.pg1.sw_if_index,
+ ipv4_mss=0, ipv6_mss=0,
+ ipv4_direction=0, ipv6_direction=0)
+
+ # Send the packets again and ensure they are unchanged.
+ self.send_and_verify_ip6(self.pg1, self.pg0, 1460, 1460)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_nat44_ed.py b/test/test_nat44_ed.py
new file mode 100644
index 00000000000..2ce7f23dac9
--- /dev/null
+++ b/test/test_nat44_ed.py
@@ -0,0 +1,3662 @@
+#!/usr/bin/env python3
+
+import unittest
+from io import BytesIO
+from random import randint, shuffle, choice
+
+import scapy.compat
+from framework import VppTestCase, VppTestRunner
+from scapy.data import IP_PROTOS
+from scapy.layers.inet import IP, TCP, UDP, ICMP, GRE
+from scapy.layers.inet import IPerror, TCPerror
+from scapy.layers.l2 import Ether
+from scapy.packet import Raw
+from syslog_rfc5424_parser import SyslogMessage, ParseError
+from syslog_rfc5424_parser.constants import SyslogSeverity
+from util import ppp, ip4_range
+from vpp_acl import AclRule, VppAcl, VppAclInterface
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_papi import VppEnum
+
+
+class NAT44EDTestCase(VppTestCase):
+
+ nat_addr = '10.0.0.3'
+
+ tcp_port_in = 6303
+ tcp_port_out = 6303
+
+ udp_port_in = 6304
+ udp_port_out = 6304
+
+ icmp_id_in = 6305
+ icmp_id_out = 6305
+
+ tcp_external_port = 80
+
+ max_sessions = 100
+
+ def setUp(self):
+ super(NAT44EDTestCase, self).setUp()
+ self.plugin_enable()
+
+ def tearDown(self):
+ super(NAT44EDTestCase, self).tearDown()
+ if not self.vpp_dead:
+ self.plugin_disable()
+
+ def plugin_enable(self):
+ self.vapi.nat44_ed_plugin_enable_disable(
+ sessions=self.max_sessions, enable=1)
+
+ def plugin_disable(self):
+ self.vapi.nat44_ed_plugin_enable_disable(enable=0)
+
+ @property
+ def config_flags(self):
+ return VppEnum.vl_api_nat_config_flags_t
+
+ @property
+ def nat44_config_flags(self):
+ return VppEnum.vl_api_nat44_config_flags_t
+
+ @property
+ def syslog_severity(self):
+ return VppEnum.vl_api_syslog_severity_t
+
+ @property
+ def server_addr(self):
+ return self.pg1.remote_hosts[0].ip4
+
+ @staticmethod
+ def random_port():
+ return randint(1025, 65535)
+
+ @staticmethod
+ def proto2layer(proto):
+ if proto == IP_PROTOS.tcp:
+ return TCP
+ elif proto == IP_PROTOS.udp:
+ return UDP
+ elif proto == IP_PROTOS.icmp:
+ return ICMP
+ else:
+ raise Exception("Unsupported protocol")
+
+ @classmethod
+ def create_and_add_ip4_table(cls, i, table_id=0):
+ cls.vapi.ip_table_add_del(is_add=1, table={'table_id': table_id})
+ i.set_table_ip4(table_id)
+
+ @classmethod
+ def configure_ip4_interface(cls, i, hosts=0, table_id=None):
+ if table_id:
+ cls.create_and_add_ip4_table(i, table_id)
+
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+
+ if hosts:
+ i.generate_remote_hosts(hosts)
+ i.configure_ipv4_neighbors()
+
+ @classmethod
+ def nat_add_interface_address(cls, i):
+ cls.vapi.nat44_add_del_interface_addr(
+ sw_if_index=i.sw_if_index, is_add=1)
+
+ def nat_add_inside_interface(self, i):
+ self.vapi.nat44_interface_add_del_feature(
+ flags=self.config_flags.NAT_IS_INSIDE,
+ sw_if_index=i.sw_if_index, is_add=1)
+
+ def nat_add_outside_interface(self, i):
+ self.vapi.nat44_interface_add_del_feature(
+ flags=self.config_flags.NAT_IS_OUTSIDE,
+ sw_if_index=i.sw_if_index, is_add=1)
+
+ def nat_add_address(self, address, twice_nat=0,
+ vrf_id=0xFFFFFFFF, is_add=1):
+ flags = self.config_flags.NAT_IS_TWICE_NAT if twice_nat else 0
+ self.vapi.nat44_add_del_address_range(first_ip_address=address,
+ last_ip_address=address,
+ vrf_id=vrf_id,
+ is_add=is_add,
+ flags=flags)
+
+ def nat_add_static_mapping(self, local_ip, external_ip='0.0.0.0',
+ local_port=0, external_port=0, vrf_id=0,
+ is_add=1, external_sw_if_index=0xFFFFFFFF,
+ proto=0, tag="", flags=0):
+
+ if not (local_port and external_port):
+ flags |= self.config_flags.NAT_IS_ADDR_ONLY
+
+ self.vapi.nat44_add_del_static_mapping(
+ is_add=is_add,
+ local_ip_address=local_ip,
+ external_ip_address=external_ip,
+ external_sw_if_index=external_sw_if_index,
+ local_port=local_port,
+ external_port=external_port,
+ vrf_id=vrf_id, protocol=proto,
+ flags=flags,
+ tag=tag)
+
+ @classmethod
+ def setUpClass(cls):
+ super(NAT44EDTestCase, cls).setUpClass()
+
+ cls.create_pg_interfaces(range(12))
+ cls.interfaces = list(cls.pg_interfaces[:4])
+
+ cls.create_and_add_ip4_table(cls.pg2, 10)
+
+ for i in cls.interfaces:
+ cls.configure_ip4_interface(i, hosts=3)
+
+ # test specific (test-multiple-vrf)
+ cls.vapi.ip_table_add_del(is_add=1, table={'table_id': 1})
+
+ # test specific (test-one-armed-nat44-static)
+ cls.pg4.generate_remote_hosts(2)
+ cls.pg4.config_ip4()
+ cls.vapi.sw_interface_add_del_address(
+ sw_if_index=cls.pg4.sw_if_index,
+ prefix="10.0.0.1/24")
+ cls.pg4.admin_up()
+ cls.pg4.resolve_arp()
+ cls.pg4._remote_hosts[1]._ip4 = cls.pg4._remote_hosts[0]._ip4
+ cls.pg4.resolve_arp()
+
+ # test specific interface (pg5)
+ cls.pg5._local_ip4 = "10.1.1.1"
+ cls.pg5._remote_hosts[0]._ip4 = "10.1.1.2"
+ cls.pg5.set_table_ip4(1)
+ cls.pg5.config_ip4()
+ cls.pg5.admin_up()
+ cls.pg5.resolve_arp()
+
+ # test specific interface (pg6)
+ cls.pg6._local_ip4 = "10.1.2.1"
+ cls.pg6._remote_hosts[0]._ip4 = "10.1.2.2"
+ cls.pg6.set_table_ip4(1)
+ cls.pg6.config_ip4()
+ cls.pg6.admin_up()
+ cls.pg6.resolve_arp()
+
+ rl = list()
+
+ rl.append(VppIpRoute(cls, "0.0.0.0", 0,
+ [VppRoutePath("0.0.0.0", 0xffffffff,
+ nh_table_id=0)],
+ register=False, table_id=1))
+ rl.append(VppIpRoute(cls, "0.0.0.0", 0,
+ [VppRoutePath(cls.pg1.local_ip4,
+ cls.pg1.sw_if_index)],
+ register=False))
+ rl.append(VppIpRoute(cls, cls.pg5.remote_ip4, 32,
+ [VppRoutePath("0.0.0.0",
+ cls.pg5.sw_if_index)],
+ register=False, table_id=1))
+ rl.append(VppIpRoute(cls, cls.pg6.remote_ip4, 32,
+ [VppRoutePath("0.0.0.0",
+ cls.pg6.sw_if_index)],
+ register=False, table_id=1))
+ rl.append(VppIpRoute(cls, cls.pg6.remote_ip4, 16,
+ [VppRoutePath("0.0.0.0", 0xffffffff,
+ nh_table_id=1)],
+ register=False, table_id=0))
+
+ for r in rl:
+ r.add_vpp_config()
+
+ def get_err_counter(self, path):
+ return self.statistics.get_err_counter(path)
+
+ def reass_hairpinning(self, server_addr, server_in_port, server_out_port,
+ host_in_port, proto=IP_PROTOS.tcp,
+ ignore_port=False):
+ layer = self.proto2layer(proto)
+
+ if proto == IP_PROTOS.tcp:
+ data = b"A" * 4 + b"B" * 16 + b"C" * 3
+ else:
+ data = b"A" * 16 + b"B" * 16 + b"C" * 3
+
+ # send packet from host to server
+ pkts = self.create_stream_frag(self.pg0,
+ self.nat_addr,
+ host_in_port,
+ server_out_port,
+ data,
+ proto)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ frags = self.pg0.get_capture(len(pkts))
+ p = self.reass_frags_and_verify(frags,
+ self.nat_addr,
+ server_addr)
+ if proto != IP_PROTOS.icmp:
+ if not ignore_port:
+ self.assertNotEqual(p[layer].sport, host_in_port)
+ self.assertEqual(p[layer].dport, server_in_port)
+ else:
+ if not ignore_port:
+ self.assertNotEqual(p[layer].id, host_in_port)
+ self.assertEqual(data, p[Raw].load)
+
+ def frag_out_of_order(self, proto=IP_PROTOS.tcp, dont_translate=False,
+ ignore_port=False):
+ layer = self.proto2layer(proto)
+
+ if proto == IP_PROTOS.tcp:
+ data = b"A" * 4 + b"B" * 16 + b"C" * 3
+ else:
+ data = b"A" * 16 + b"B" * 16 + b"C" * 3
+ self.port_in = self.random_port()
+
+ for i in range(2):
+ # in2out
+ pkts = self.create_stream_frag(self.pg0, self.pg1.remote_ip4,
+ self.port_in, 20, data, proto)
+ pkts.reverse()
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ frags = self.pg1.get_capture(len(pkts))
+ if not dont_translate:
+ p = self.reass_frags_and_verify(frags,
+ self.nat_addr,
+ self.pg1.remote_ip4)
+ else:
+ p = self.reass_frags_and_verify(frags,
+ self.pg0.remote_ip4,
+ self.pg1.remote_ip4)
+ if proto != IP_PROTOS.icmp:
+ if not dont_translate:
+ self.assertEqual(p[layer].dport, 20)
+ if not ignore_port:
+ self.assertNotEqual(p[layer].sport, self.port_in)
+ else:
+ self.assertEqual(p[layer].sport, self.port_in)
+ else:
+ if not ignore_port:
+ if not dont_translate:
+ self.assertNotEqual(p[layer].id, self.port_in)
+ else:
+ self.assertEqual(p[layer].id, self.port_in)
+ self.assertEqual(data, p[Raw].load)
+
+ # out2in
+ if not dont_translate:
+ dst_addr = self.nat_addr
+ else:
+ dst_addr = self.pg0.remote_ip4
+ if proto != IP_PROTOS.icmp:
+ sport = 20
+ dport = p[layer].sport
+ else:
+ sport = p[layer].id
+ dport = 0
+ pkts = self.create_stream_frag(self.pg1, dst_addr, sport, dport,
+ data, proto, echo_reply=True)
+ pkts.reverse()
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.logger.info(self.vapi.cli("show trace"))
+ self.pg_start()
+ frags = self.pg0.get_capture(len(pkts))
+ p = self.reass_frags_and_verify(frags,
+ self.pg1.remote_ip4,
+ self.pg0.remote_ip4)
+ if proto != IP_PROTOS.icmp:
+ self.assertEqual(p[layer].sport, 20)
+ self.assertEqual(p[layer].dport, self.port_in)
+ else:
+ self.assertEqual(p[layer].id, self.port_in)
+ self.assertEqual(data, p[Raw].load)
+
+ def reass_frags_and_verify(self, frags, src, dst):
+ buffer = BytesIO()
+ for p in frags:
+ self.assertEqual(p[IP].src, src)
+ self.assertEqual(p[IP].dst, dst)
+ self.assert_ip_checksum_valid(p)
+ buffer.seek(p[IP].frag * 8)
+ buffer.write(bytes(p[IP].payload))
+ ip = IP(src=frags[0][IP].src, dst=frags[0][IP].dst,
+ proto=frags[0][IP].proto)
+ if ip.proto == IP_PROTOS.tcp:
+ p = (ip / TCP(buffer.getvalue()))
+ self.logger.debug(ppp("Reassembled:", p))
+ self.assert_tcp_checksum_valid(p)
+ elif ip.proto == IP_PROTOS.udp:
+ p = (ip / UDP(buffer.getvalue()[:8]) /
+ Raw(buffer.getvalue()[8:]))
+ elif ip.proto == IP_PROTOS.icmp:
+ p = (ip / ICMP(buffer.getvalue()))
+ return p
+
+ def frag_in_order(self, proto=IP_PROTOS.tcp, dont_translate=False,
+ ignore_port=False):
+ layer = self.proto2layer(proto)
+
+ if proto == IP_PROTOS.tcp:
+ data = b"A" * 4 + b"B" * 16 + b"C" * 3
+ else:
+ data = b"A" * 16 + b"B" * 16 + b"C" * 3
+ self.port_in = self.random_port()
+
+ # in2out
+ pkts = self.create_stream_frag(self.pg0, self.pg1.remote_ip4,
+ self.port_in, 20, data, proto)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ frags = self.pg1.get_capture(len(pkts))
+ if not dont_translate:
+ p = self.reass_frags_and_verify(frags,
+ self.nat_addr,
+ self.pg1.remote_ip4)
+ else:
+ p = self.reass_frags_and_verify(frags,
+ self.pg0.remote_ip4,
+ self.pg1.remote_ip4)
+ if proto != IP_PROTOS.icmp:
+ if not dont_translate:
+ self.assertEqual(p[layer].dport, 20)
+ if not ignore_port:
+ self.assertNotEqual(p[layer].sport, self.port_in)
+ else:
+ self.assertEqual(p[layer].sport, self.port_in)
+ else:
+ if not ignore_port:
+ if not dont_translate:
+ self.assertNotEqual(p[layer].id, self.port_in)
+ else:
+ self.assertEqual(p[layer].id, self.port_in)
+ self.assertEqual(data, p[Raw].load)
+
+ # out2in
+ if not dont_translate:
+ dst_addr = self.nat_addr
+ else:
+ dst_addr = self.pg0.remote_ip4
+ if proto != IP_PROTOS.icmp:
+ sport = 20
+ dport = p[layer].sport
+ else:
+ sport = p[layer].id
+ dport = 0
+ pkts = self.create_stream_frag(self.pg1, dst_addr, sport, dport, data,
+ proto, echo_reply=True)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ frags = self.pg0.get_capture(len(pkts))
+ p = self.reass_frags_and_verify(frags,
+ self.pg1.remote_ip4,
+ self.pg0.remote_ip4)
+ if proto != IP_PROTOS.icmp:
+ self.assertEqual(p[layer].sport, 20)
+ self.assertEqual(p[layer].dport, self.port_in)
+ else:
+ self.assertEqual(p[layer].id, self.port_in)
+ self.assertEqual(data, p[Raw].load)
+
+ def verify_capture_out(self, capture, nat_ip=None, same_port=False,
+ dst_ip=None, ignore_port=False):
+ if nat_ip is None:
+ nat_ip = self.nat_addr
+ for packet in capture:
+ try:
+ self.assert_packet_checksums_valid(packet)
+ self.assertEqual(packet[IP].src, nat_ip)
+ if dst_ip is not None:
+ self.assertEqual(packet[IP].dst, dst_ip)
+ if packet.haslayer(TCP):
+ if not ignore_port:
+ if same_port:
+ self.assertEqual(
+ packet[TCP].sport, self.tcp_port_in)
+ else:
+ self.assertNotEqual(
+ packet[TCP].sport, self.tcp_port_in)
+ self.tcp_port_out = packet[TCP].sport
+ self.assert_packet_checksums_valid(packet)
+ elif packet.haslayer(UDP):
+ if not ignore_port:
+ if same_port:
+ self.assertEqual(
+ packet[UDP].sport, self.udp_port_in)
+ else:
+ self.assertNotEqual(
+ packet[UDP].sport, self.udp_port_in)
+ self.udp_port_out = packet[UDP].sport
+ else:
+ if not ignore_port:
+ if same_port:
+ self.assertEqual(
+ packet[ICMP].id, self.icmp_id_in)
+ else:
+ self.assertNotEqual(
+ packet[ICMP].id, self.icmp_id_in)
+ self.icmp_id_out = packet[ICMP].id
+ self.assert_packet_checksums_valid(packet)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet "
+ "(outside network):", packet))
+ raise
+
+ def verify_capture_in(self, capture, in_if):
+ for packet in capture:
+ try:
+ self.assert_packet_checksums_valid(packet)
+ self.assertEqual(packet[IP].dst, in_if.remote_ip4)
+ if packet.haslayer(TCP):
+ self.assertEqual(packet[TCP].dport, self.tcp_port_in)
+ elif packet.haslayer(UDP):
+ self.assertEqual(packet[UDP].dport, self.udp_port_in)
+ else:
+ self.assertEqual(packet[ICMP].id, self.icmp_id_in)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet "
+ "(inside network):", packet))
+ raise
+
+ def create_stream_in(self, in_if, out_if, dst_ip=None, ttl=64):
+ if dst_ip is None:
+ dst_ip = out_if.remote_ip4
+
+ pkts = []
+ # TCP
+ p = (Ether(dst=in_if.local_mac, src=in_if.remote_mac) /
+ IP(src=in_if.remote_ip4, dst=dst_ip, ttl=ttl) /
+ TCP(sport=self.tcp_port_in, dport=20))
+ pkts.extend([p, p])
+
+ # UDP
+ p = (Ether(dst=in_if.local_mac, src=in_if.remote_mac) /
+ IP(src=in_if.remote_ip4, dst=dst_ip, ttl=ttl) /
+ UDP(sport=self.udp_port_in, dport=20))
+ pkts.append(p)
+
+ # ICMP
+ p = (Ether(dst=in_if.local_mac, src=in_if.remote_mac) /
+ IP(src=in_if.remote_ip4, dst=dst_ip, ttl=ttl) /
+ ICMP(id=self.icmp_id_in, type='echo-request'))
+ pkts.append(p)
+
+ return pkts
+
+ def create_stream_out(self, out_if, dst_ip=None, ttl=64,
+ use_inside_ports=False):
+ if dst_ip is None:
+ dst_ip = self.nat_addr
+ if not use_inside_ports:
+ tcp_port = self.tcp_port_out
+ udp_port = self.udp_port_out
+ icmp_id = self.icmp_id_out
+ else:
+ tcp_port = self.tcp_port_in
+ udp_port = self.udp_port_in
+ icmp_id = self.icmp_id_in
+ pkts = []
+ # TCP
+ p = (Ether(dst=out_if.local_mac, src=out_if.remote_mac) /
+ IP(src=out_if.remote_ip4, dst=dst_ip, ttl=ttl) /
+ TCP(dport=tcp_port, sport=20))
+ pkts.extend([p, p])
+
+ # UDP
+ p = (Ether(dst=out_if.local_mac, src=out_if.remote_mac) /
+ IP(src=out_if.remote_ip4, dst=dst_ip, ttl=ttl) /
+ UDP(dport=udp_port, sport=20))
+ pkts.append(p)
+
+ # ICMP
+ p = (Ether(dst=out_if.local_mac, src=out_if.remote_mac) /
+ IP(src=out_if.remote_ip4, dst=dst_ip, ttl=ttl) /
+ ICMP(id=icmp_id, type='echo-reply'))
+ pkts.append(p)
+
+ return pkts
+
+ def create_tcp_stream(self, in_if, out_if, count):
+ pkts = []
+ port = 6303
+
+ for i in range(count):
+ p = (Ether(dst=in_if.local_mac, src=in_if.remote_mac) /
+ IP(src=in_if.remote_ip4, dst=out_if.remote_ip4, ttl=64) /
+ TCP(sport=port + i, dport=20))
+ pkts.append(p)
+
+ return pkts
+
+ def create_stream_frag(self, src_if, dst, sport, dport, data,
+ proto=IP_PROTOS.tcp, echo_reply=False):
+ if proto == IP_PROTOS.tcp:
+ p = (IP(src=src_if.remote_ip4, dst=dst) /
+ TCP(sport=sport, dport=dport) /
+ Raw(data))
+ p = p.__class__(scapy.compat.raw(p))
+ chksum = p[TCP].chksum
+ proto_header = TCP(sport=sport, dport=dport, chksum=chksum)
+ elif proto == IP_PROTOS.udp:
+ proto_header = UDP(sport=sport, dport=dport)
+ elif proto == IP_PROTOS.icmp:
+ if not echo_reply:
+ proto_header = ICMP(id=sport, type='echo-request')
+ else:
+ proto_header = ICMP(id=sport, type='echo-reply')
+ else:
+ raise Exception("Unsupported protocol")
+ id = self.random_port()
+ pkts = []
+ if proto == IP_PROTOS.tcp:
+ raw = Raw(data[0:4])
+ else:
+ raw = Raw(data[0:16])
+ p = (Ether(src=src_if.remote_mac, dst=src_if.local_mac) /
+ IP(src=src_if.remote_ip4, dst=dst, flags="MF", frag=0, id=id) /
+ proto_header /
+ raw)
+ pkts.append(p)
+ if proto == IP_PROTOS.tcp:
+ raw = Raw(data[4:20])
+ else:
+ raw = Raw(data[16:32])
+ p = (Ether(src=src_if.remote_mac, dst=src_if.local_mac) /
+ IP(src=src_if.remote_ip4, dst=dst, flags="MF", frag=3, id=id,
+ proto=proto) /
+ raw)
+ pkts.append(p)
+ if proto == IP_PROTOS.tcp:
+ raw = Raw(data[20:])
+ else:
+ raw = Raw(data[32:])
+ p = (Ether(src=src_if.remote_mac, dst=src_if.local_mac) /
+ IP(src=src_if.remote_ip4, dst=dst, frag=5, proto=proto,
+ id=id) /
+ raw)
+ pkts.append(p)
+ return pkts
+
+ def frag_in_order_in_plus_out(self, in_addr, out_addr, in_port, out_port,
+ proto=IP_PROTOS.tcp):
+
+ layer = self.proto2layer(proto)
+
+ if proto == IP_PROTOS.tcp:
+ data = b"A" * 4 + b"B" * 16 + b"C" * 3
+ else:
+ data = b"A" * 16 + b"B" * 16 + b"C" * 3
+ port_in = self.random_port()
+
+ for i in range(2):
+ # out2in
+ pkts = self.create_stream_frag(self.pg0, out_addr,
+ port_in, out_port,
+ data, proto)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ frags = self.pg1.get_capture(len(pkts))
+ p = self.reass_frags_and_verify(frags,
+ self.pg0.remote_ip4,
+ in_addr)
+ if proto != IP_PROTOS.icmp:
+ self.assertEqual(p[layer].sport, port_in)
+ self.assertEqual(p[layer].dport, in_port)
+ else:
+ self.assertEqual(p[layer].id, port_in)
+ self.assertEqual(data, p[Raw].load)
+
+ # in2out
+ if proto != IP_PROTOS.icmp:
+ pkts = self.create_stream_frag(self.pg1, self.pg0.remote_ip4,
+ in_port,
+ p[layer].sport, data, proto)
+ else:
+ pkts = self.create_stream_frag(self.pg1, self.pg0.remote_ip4,
+ p[layer].id, 0, data, proto,
+ echo_reply=True)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ frags = self.pg0.get_capture(len(pkts))
+ p = self.reass_frags_and_verify(frags,
+ out_addr,
+ self.pg0.remote_ip4)
+ if proto != IP_PROTOS.icmp:
+ self.assertEqual(p[layer].sport, out_port)
+ self.assertEqual(p[layer].dport, port_in)
+ else:
+ self.assertEqual(p[layer].id, port_in)
+ self.assertEqual(data, p[Raw].load)
+
+ def frag_out_of_order_in_plus_out(self, in_addr, out_addr, in_port,
+ out_port, proto=IP_PROTOS.tcp):
+
+ layer = self.proto2layer(proto)
+
+ if proto == IP_PROTOS.tcp:
+ data = b"A" * 4 + b"B" * 16 + b"C" * 3
+ else:
+ data = b"A" * 16 + b"B" * 16 + b"C" * 3
+ port_in = self.random_port()
+
+ for i in range(2):
+ # out2in
+ pkts = self.create_stream_frag(self.pg0, out_addr,
+ port_in, out_port,
+ data, proto)
+ pkts.reverse()
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ frags = self.pg1.get_capture(len(pkts))
+ p = self.reass_frags_and_verify(frags,
+ self.pg0.remote_ip4,
+ in_addr)
+ if proto != IP_PROTOS.icmp:
+ self.assertEqual(p[layer].dport, in_port)
+ self.assertEqual(p[layer].sport, port_in)
+ self.assertEqual(p[layer].dport, in_port)
+ else:
+ self.assertEqual(p[layer].id, port_in)
+ self.assertEqual(data, p[Raw].load)
+
+ # in2out
+ if proto != IP_PROTOS.icmp:
+ pkts = self.create_stream_frag(self.pg1, self.pg0.remote_ip4,
+ in_port,
+ p[layer].sport, data, proto)
+ else:
+ pkts = self.create_stream_frag(self.pg1, self.pg0.remote_ip4,
+ p[layer].id, 0, data, proto,
+ echo_reply=True)
+ pkts.reverse()
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ frags = self.pg0.get_capture(len(pkts))
+ p = self.reass_frags_and_verify(frags,
+ out_addr,
+ self.pg0.remote_ip4)
+ if proto != IP_PROTOS.icmp:
+ self.assertEqual(p[layer].sport, out_port)
+ self.assertEqual(p[layer].dport, port_in)
+ else:
+ self.assertEqual(p[layer].id, port_in)
+ self.assertEqual(data, p[Raw].load)
+
+ def init_tcp_session(self, in_if, out_if, in_port, ext_port):
+ # SYN packet in->out
+ p = (Ether(src=in_if.remote_mac, dst=in_if.local_mac) /
+ IP(src=in_if.remote_ip4, dst=out_if.remote_ip4) /
+ TCP(sport=in_port, dport=ext_port, flags="S"))
+ in_if.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = out_if.get_capture(1)
+ p = capture[0]
+ out_port = p[TCP].sport
+
+ # SYN + ACK packet out->in
+ p = (Ether(src=out_if.remote_mac, dst=out_if.local_mac) /
+ IP(src=out_if.remote_ip4, dst=self.nat_addr) /
+ TCP(sport=ext_port, dport=out_port, flags="SA"))
+ out_if.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ in_if.get_capture(1)
+
+ # ACK packet in->out
+ p = (Ether(src=in_if.remote_mac, dst=in_if.local_mac) /
+ IP(src=in_if.remote_ip4, dst=out_if.remote_ip4) /
+ TCP(sport=in_port, dport=ext_port, flags="A"))
+ in_if.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ out_if.get_capture(1)
+
+ return out_port
+
+ def twice_nat_common(self, self_twice_nat=False, same_pg=False, lb=False,
+ client_id=None):
+ twice_nat_addr = '10.0.1.3'
+
+ port_in = 8080
+ if lb:
+ if not same_pg:
+ port_in1 = port_in
+ port_in2 = port_in
+ else:
+ port_in1 = port_in + 1
+ port_in2 = port_in + 2
+
+ port_out = 80
+ eh_port_out = 4567
+
+ server1 = self.pg0.remote_hosts[0]
+ server2 = self.pg0.remote_hosts[1]
+ if lb and same_pg:
+ server2 = server1
+ if not lb:
+ server = server1
+
+ pg0 = self.pg0
+ if same_pg:
+ pg1 = self.pg0
+ else:
+ pg1 = self.pg1
+
+ eh_translate = ((not self_twice_nat) or (not lb and same_pg) or
+ client_id == 1)
+
+ self.nat_add_address(self.nat_addr)
+ self.nat_add_address(twice_nat_addr, twice_nat=1)
+
+ flags = 0
+ if self_twice_nat:
+ flags |= self.config_flags.NAT_IS_SELF_TWICE_NAT
+ else:
+ flags |= self.config_flags.NAT_IS_TWICE_NAT
+
+ if not lb:
+ self.nat_add_static_mapping(pg0.remote_ip4, self.nat_addr,
+ port_in, port_out,
+ proto=IP_PROTOS.tcp,
+ flags=flags)
+ else:
+ locals = [{'addr': server1.ip4,
+ 'port': port_in1,
+ 'probability': 50,
+ 'vrf_id': 0},
+ {'addr': server2.ip4,
+ 'port': port_in2,
+ 'probability': 50,
+ 'vrf_id': 0}]
+ out_addr = self.nat_addr
+
+ self.vapi.nat44_add_del_lb_static_mapping(is_add=1, flags=flags,
+ external_addr=out_addr,
+ external_port=port_out,
+ protocol=IP_PROTOS.tcp,
+ local_num=len(locals),
+ locals=locals)
+ self.nat_add_inside_interface(pg0)
+ self.nat_add_outside_interface(pg1)
+
+ if same_pg:
+ if not lb:
+ client = server
+ else:
+ assert client_id is not None
+ if client_id == 1:
+ client = self.pg0.remote_hosts[0]
+ elif client_id == 2:
+ client = self.pg0.remote_hosts[1]
+ else:
+ client = pg1.remote_hosts[0]
+ p = (Ether(src=pg1.remote_mac, dst=pg1.local_mac) /
+ IP(src=client.ip4, dst=self.nat_addr) /
+ TCP(sport=eh_port_out, dport=port_out))
+ pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = pg0.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ if lb:
+ if ip.dst == server1.ip4:
+ server = server1
+ port_in = port_in1
+ else:
+ server = server2
+ port_in = port_in2
+ self.assertEqual(ip.dst, server.ip4)
+ if lb and same_pg:
+ self.assertIn(tcp.dport, [port_in1, port_in2])
+ else:
+ self.assertEqual(tcp.dport, port_in)
+ if eh_translate:
+ self.assertEqual(ip.src, twice_nat_addr)
+ self.assertNotEqual(tcp.sport, eh_port_out)
+ else:
+ self.assertEqual(ip.src, client.ip4)
+ self.assertEqual(tcp.sport, eh_port_out)
+ eh_addr_in = ip.src
+ eh_port_in = tcp.sport
+ saved_port_in = tcp.dport
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ p = (Ether(src=server.mac, dst=pg0.local_mac) /
+ IP(src=server.ip4, dst=eh_addr_in) /
+ TCP(sport=saved_port_in, dport=eh_port_in))
+ pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = pg1.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.dst, client.ip4)
+ self.assertEqual(ip.src, self.nat_addr)
+ self.assertEqual(tcp.dport, eh_port_out)
+ self.assertEqual(tcp.sport, port_out)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ if eh_translate:
+ sessions = self.vapi.nat44_user_session_dump(server.ip4, 0)
+ self.assertEqual(len(sessions), 1)
+ self.assertTrue(sessions[0].flags &
+ self.config_flags.NAT_IS_EXT_HOST_VALID)
+ self.assertTrue(sessions[0].flags &
+ self.config_flags.NAT_IS_TWICE_NAT)
+ self.logger.info(self.vapi.cli("show nat44 sessions"))
+ self.vapi.nat44_del_session(
+ address=sessions[0].inside_ip_address,
+ port=sessions[0].inside_port,
+ protocol=sessions[0].protocol,
+ flags=(self.config_flags.NAT_IS_INSIDE |
+ self.config_flags.NAT_IS_EXT_HOST_VALID),
+ ext_host_address=sessions[0].ext_host_nat_address,
+ ext_host_port=sessions[0].ext_host_nat_port)
+ sessions = self.vapi.nat44_user_session_dump(server.ip4, 0)
+ self.assertEqual(len(sessions), 0)
+
+ def verify_syslog_sess(self, data, is_add=True, is_ip6=False):
+ message = data.decode('utf-8')
+ try:
+ message = SyslogMessage.parse(message)
+ except ParseError as e:
+ self.logger.error(e)
+ raise
+ else:
+ self.assertEqual(message.severity, SyslogSeverity.info)
+ self.assertEqual(message.appname, 'NAT')
+ self.assertEqual(message.msgid, 'SADD' if is_add else 'SDEL')
+ sd_params = message.sd.get('nsess')
+ self.assertTrue(sd_params is not None)
+ if is_ip6:
+ self.assertEqual(sd_params.get('IATYP'), 'IPv6')
+ self.assertEqual(sd_params.get('ISADDR'), self.pg0.remote_ip6)
+ else:
+ self.assertEqual(sd_params.get('IATYP'), 'IPv4')
+ self.assertEqual(sd_params.get('ISADDR'), self.pg0.remote_ip4)
+ self.assertTrue(sd_params.get('SSUBIX') is not None)
+ self.assertEqual(sd_params.get('ISPORT'), "%d" % self.tcp_port_in)
+ self.assertEqual(sd_params.get('XATYP'), 'IPv4')
+ self.assertEqual(sd_params.get('XSADDR'), self.nat_addr)
+ self.assertEqual(sd_params.get('XSPORT'), "%d" % self.tcp_port_out)
+ self.assertEqual(sd_params.get('PROTO'), "%d" % IP_PROTOS.tcp)
+ self.assertEqual(sd_params.get('SVLAN'), '0')
+ self.assertEqual(sd_params.get('XDADDR'), self.pg1.remote_ip4)
+ self.assertEqual(sd_params.get('XDPORT'),
+ "%d" % self.tcp_external_port)
+
+
+class TestNAT44ED(NAT44EDTestCase):
+ """ NAT44ED Test Case """
+
+ def test_users_dump(self):
+ """ NAT44ED API test - nat44_user_dump """
+
+ self.nat_add_address(self.nat_addr)
+ self.nat_add_inside_interface(self.pg0)
+ self.nat_add_outside_interface(self.pg1)
+
+ self.vapi.nat44_forwarding_enable_disable(enable=1)
+
+ local_ip = self.pg0.remote_ip4
+ external_ip = self.nat_addr
+ self.nat_add_static_mapping(local_ip, external_ip)
+
+ users = self.vapi.nat44_user_dump()
+ self.assertEqual(len(users), 0)
+
+ # in2out - static mapping match
+
+ pkts = self.create_stream_out(self.pg1)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg0)
+
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture, same_port=True)
+
+ users = self.vapi.nat44_user_dump()
+ self.assertEqual(len(users), 1)
+ static_user = users[0]
+ self.assertEqual(static_user.nstaticsessions, 3)
+ self.assertEqual(static_user.nsessions, 0)
+
+ # in2out - no static mapping match (forwarding test)
+
+ host0 = self.pg0.remote_hosts[0]
+ self.pg0.remote_hosts[0] = self.pg0.remote_hosts[1]
+ try:
+ pkts = self.create_stream_out(self.pg1,
+ dst_ip=self.pg0.remote_ip4,
+ use_inside_ports=True)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg0)
+
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture, nat_ip=self.pg0.remote_ip4,
+ same_port=True)
+ finally:
+ self.pg0.remote_hosts[0] = host0
+
+ users = self.vapi.nat44_user_dump()
+ self.assertEqual(len(users), 2)
+ if str(users[0].ip_address) == self.pg0.remote_hosts[0].ip4:
+ non_static_user = users[1]
+ static_user = users[0]
+ else:
+ non_static_user = users[0]
+ static_user = users[1]
+ self.assertEqual(static_user.nstaticsessions, 3)
+ self.assertEqual(static_user.nsessions, 0)
+ self.assertEqual(non_static_user.nstaticsessions, 0)
+ self.assertEqual(non_static_user.nsessions, 3)
+
+ users = self.vapi.nat44_user_dump()
+ self.assertEqual(len(users), 2)
+ if str(users[0].ip_address) == self.pg0.remote_hosts[0].ip4:
+ non_static_user = users[1]
+ static_user = users[0]
+ else:
+ non_static_user = users[0]
+ static_user = users[1]
+ self.assertEqual(static_user.nstaticsessions, 3)
+ self.assertEqual(static_user.nsessions, 0)
+ self.assertEqual(non_static_user.nstaticsessions, 0)
+ self.assertEqual(non_static_user.nsessions, 3)
+
+ def test_frag_out_of_order_do_not_translate(self):
+ """ NAT44ED don't translate fragments arriving out of order """
+ self.nat_add_inside_interface(self.pg0)
+ self.nat_add_outside_interface(self.pg1)
+ self.vapi.nat44_forwarding_enable_disable(enable=True)
+ self.frag_out_of_order(proto=IP_PROTOS.tcp, dont_translate=True)
+
+ def test_forwarding(self):
+ """ NAT44ED forwarding test """
+
+ self.nat_add_inside_interface(self.pg0)
+ self.nat_add_outside_interface(self.pg1)
+ self.vapi.nat44_forwarding_enable_disable(enable=1)
+
+ real_ip = self.pg0.remote_ip4
+ alias_ip = self.nat_addr
+ flags = self.config_flags.NAT_IS_ADDR_ONLY
+ self.vapi.nat44_add_del_static_mapping(is_add=1,
+ local_ip_address=real_ip,
+ external_ip_address=alias_ip,
+ external_sw_if_index=0xFFFFFFFF,
+ flags=flags)
+
+ try:
+ # in2out - static mapping match
+
+ pkts = self.create_stream_out(self.pg1)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg0)
+
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture, same_port=True)
+
+ # in2out - no static mapping match
+
+ host0 = self.pg0.remote_hosts[0]
+ self.pg0.remote_hosts[0] = self.pg0.remote_hosts[1]
+ try:
+ pkts = self.create_stream_out(self.pg1,
+ dst_ip=self.pg0.remote_ip4,
+ use_inside_ports=True)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg0)
+
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture, nat_ip=self.pg0.remote_ip4,
+ same_port=True)
+ finally:
+ self.pg0.remote_hosts[0] = host0
+
+ user = self.pg0.remote_hosts[1]
+ sessions = self.vapi.nat44_user_session_dump(user.ip4, 0)
+ self.assertEqual(len(sessions), 3)
+ self.assertTrue(sessions[0].flags &
+ self.config_flags.NAT_IS_EXT_HOST_VALID)
+ self.vapi.nat44_del_session(
+ address=sessions[0].inside_ip_address,
+ port=sessions[0].inside_port,
+ protocol=sessions[0].protocol,
+ flags=(self.config_flags.NAT_IS_INSIDE |
+ self.config_flags.NAT_IS_EXT_HOST_VALID),
+ ext_host_address=sessions[0].ext_host_address,
+ ext_host_port=sessions[0].ext_host_port)
+ sessions = self.vapi.nat44_user_session_dump(user.ip4, 0)
+ self.assertEqual(len(sessions), 2)
+
+ finally:
+ self.vapi.nat44_forwarding_enable_disable(enable=0)
+ flags = self.config_flags.NAT_IS_ADDR_ONLY
+ self.vapi.nat44_add_del_static_mapping(
+ is_add=0,
+ local_ip_address=real_ip,
+ external_ip_address=alias_ip,
+ external_sw_if_index=0xFFFFFFFF,
+ flags=flags)
+
+ def test_output_feature_and_service2(self):
+ """ NAT44ED interface output feature and service host direct access """
+ self.vapi.nat44_forwarding_enable_disable(enable=1)
+ self.nat_add_address(self.nat_addr)
+
+ self.vapi.nat44_interface_add_del_output_feature(
+ sw_if_index=self.pg1.sw_if_index, is_add=1,)
+
+ # session initiated from service host - translate
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture, ignore_port=True)
+
+ pkts = self.create_stream_out(self.pg1)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg0)
+
+ # session initiated from remote host - do not translate
+ tcp_port_in = self.tcp_port_in
+ udp_port_in = self.udp_port_in
+ icmp_id_in = self.icmp_id_in
+
+ self.tcp_port_in = 60303
+ self.udp_port_in = 60304
+ self.icmp_id_in = 60305
+
+ try:
+ pkts = self.create_stream_out(self.pg1,
+ self.pg0.remote_ip4,
+ use_inside_ports=True)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg0)
+
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture, nat_ip=self.pg0.remote_ip4,
+ same_port=True)
+ finally:
+ self.tcp_port_in = tcp_port_in
+ self.udp_port_in = udp_port_in
+ self.icmp_id_in = icmp_id_in
+
+ def test_twice_nat(self):
+ """ NAT44ED Twice NAT """
+ self.twice_nat_common()
+
+ def test_self_twice_nat_positive(self):
+ """ NAT44ED Self Twice NAT (positive test) """
+ self.twice_nat_common(self_twice_nat=True, same_pg=True)
+
+ def test_self_twice_nat_lb_positive(self):
+ """ NAT44ED Self Twice NAT local service load balancing (positive test)
+ """
+ self.twice_nat_common(lb=True, self_twice_nat=True, same_pg=True,
+ client_id=1)
+
+ def test_twice_nat_lb(self):
+ """ NAT44ED Twice NAT local service load balancing """
+ self.twice_nat_common(lb=True)
+
+ def test_output_feature(self):
+ """ NAT44ED interface output feature (in2out postrouting) """
+ self.vapi.nat44_forwarding_enable_disable(enable=1)
+ self.nat_add_address(self.nat_addr)
+
+ self.nat_add_outside_interface(self.pg0)
+ self.vapi.nat44_interface_add_del_output_feature(
+ sw_if_index=self.pg1.sw_if_index, is_add=1)
+
+ # in2out
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture, ignore_port=True)
+
+ # out2in
+ pkts = self.create_stream_out(self.pg1)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg0)
+
+ def test_static_with_port_out2(self):
+ """ NAT44ED 1:1 NAPT asymmetrical rule """
+
+ external_port = 80
+ local_port = 8080
+
+ self.vapi.nat44_forwarding_enable_disable(enable=1)
+ flags = self.config_flags.NAT_IS_OUT2IN_ONLY
+ self.nat_add_static_mapping(self.pg0.remote_ip4, self.nat_addr,
+ local_port, external_port,
+ proto=IP_PROTOS.tcp, flags=flags)
+
+ self.nat_add_inside_interface(self.pg0)
+ self.nat_add_outside_interface(self.pg1)
+
+ # from client to service
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ TCP(sport=12345, dport=external_port))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.dst, self.pg0.remote_ip4)
+ self.assertEqual(tcp.dport, local_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # ICMP error
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ ICMP(type=11) / capture[0][IP])
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ p = capture[0]
+ try:
+ self.assertEqual(p[IP].src, self.nat_addr)
+ inner = p[IPerror]
+ self.assertEqual(inner.dst, self.nat_addr)
+ self.assertEqual(inner[TCPerror].dport, external_port)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # from service back to client
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=local_port, dport=12345))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.nat_addr)
+ self.assertEqual(tcp.sport, external_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # ICMP error
+ p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ ICMP(type=11) / capture[0][IP])
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ try:
+ self.assertEqual(p[IP].dst, self.pg0.remote_ip4)
+ inner = p[IPerror]
+ self.assertEqual(inner.src, self.pg0.remote_ip4)
+ self.assertEqual(inner[TCPerror].sport, local_port)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # from client to server (no translation)
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.pg0.remote_ip4) /
+ TCP(sport=12346, dport=local_port))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.dst, self.pg0.remote_ip4)
+ self.assertEqual(tcp.dport, local_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # from service back to client (no translation)
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=local_port, dport=12346))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.pg0.remote_ip4)
+ self.assertEqual(tcp.sport, local_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ def test_static_lb(self):
+ """ NAT44ED local service load balancing """
+ external_addr_n = self.nat_addr
+ external_port = 80
+ local_port = 8080
+ server1 = self.pg0.remote_hosts[0]
+ server2 = self.pg0.remote_hosts[1]
+
+ locals = [{'addr': server1.ip4,
+ 'port': local_port,
+ 'probability': 70,
+ 'vrf_id': 0},
+ {'addr': server2.ip4,
+ 'port': local_port,
+ 'probability': 30,
+ 'vrf_id': 0}]
+
+ self.nat_add_address(self.nat_addr)
+ self.vapi.nat44_add_del_lb_static_mapping(
+ is_add=1,
+ external_addr=external_addr_n,
+ external_port=external_port,
+ protocol=IP_PROTOS.tcp,
+ local_num=len(locals),
+ locals=locals)
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat44_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ # from client to service
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ TCP(sport=12345, dport=external_port))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ server = None
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertIn(ip.dst, [server1.ip4, server2.ip4])
+ if ip.dst == server1.ip4:
+ server = server1
+ else:
+ server = server2
+ self.assertEqual(tcp.dport, local_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # from service back to client
+ p = (Ether(src=server.mac, dst=self.pg0.local_mac) /
+ IP(src=server.ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=local_port, dport=12345))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.nat_addr)
+ self.assertEqual(tcp.sport, external_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ sessions = self.vapi.nat44_user_session_dump(server.ip4, 0)
+ self.assertEqual(len(sessions), 1)
+ self.assertTrue(sessions[0].flags &
+ self.config_flags.NAT_IS_EXT_HOST_VALID)
+ self.vapi.nat44_del_session(
+ address=sessions[0].inside_ip_address,
+ port=sessions[0].inside_port,
+ protocol=sessions[0].protocol,
+ flags=(self.config_flags.NAT_IS_INSIDE |
+ self.config_flags.NAT_IS_EXT_HOST_VALID),
+ ext_host_address=sessions[0].ext_host_address,
+ ext_host_port=sessions[0].ext_host_port)
+ sessions = self.vapi.nat44_user_session_dump(server.ip4, 0)
+ self.assertEqual(len(sessions), 0)
+
+ def test_static_lb_2(self):
+ """ NAT44ED local service load balancing (asymmetrical rule) """
+ external_addr = self.nat_addr
+ external_port = 80
+ local_port = 8080
+ server1 = self.pg0.remote_hosts[0]
+ server2 = self.pg0.remote_hosts[1]
+
+ locals = [{'addr': server1.ip4,
+ 'port': local_port,
+ 'probability': 70,
+ 'vrf_id': 0},
+ {'addr': server2.ip4,
+ 'port': local_port,
+ 'probability': 30,
+ 'vrf_id': 0}]
+
+ self.vapi.nat44_forwarding_enable_disable(enable=1)
+ flags = self.config_flags.NAT_IS_OUT2IN_ONLY
+ self.vapi.nat44_add_del_lb_static_mapping(is_add=1, flags=flags,
+ external_addr=external_addr,
+ external_port=external_port,
+ protocol=IP_PROTOS.tcp,
+ local_num=len(locals),
+ locals=locals)
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat44_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ # from client to service
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ TCP(sport=12345, dport=external_port))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ server = None
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertIn(ip.dst, [server1.ip4, server2.ip4])
+ if ip.dst == server1.ip4:
+ server = server1
+ else:
+ server = server2
+ self.assertEqual(tcp.dport, local_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # from service back to client
+ p = (Ether(src=server.mac, dst=self.pg0.local_mac) /
+ IP(src=server.ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=local_port, dport=12345))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.nat_addr)
+ self.assertEqual(tcp.sport, external_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # from client to server (no translation)
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=server1.ip4) /
+ TCP(sport=12346, dport=local_port))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ server = None
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.dst, server1.ip4)
+ self.assertEqual(tcp.dport, local_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # from service back to client (no translation)
+ p = (Ether(src=server1.mac, dst=self.pg0.local_mac) /
+ IP(src=server1.ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=local_port, dport=12346))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, server1.ip4)
+ self.assertEqual(tcp.sport, local_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ def test_lb_affinity(self):
+ """ NAT44ED local service load balancing affinity """
+ external_addr = self.nat_addr
+ external_port = 80
+ local_port = 8080
+ server1 = self.pg0.remote_hosts[0]
+ server2 = self.pg0.remote_hosts[1]
+
+ locals = [{'addr': server1.ip4,
+ 'port': local_port,
+ 'probability': 50,
+ 'vrf_id': 0},
+ {'addr': server2.ip4,
+ 'port': local_port,
+ 'probability': 50,
+ 'vrf_id': 0}]
+
+ self.nat_add_address(self.nat_addr)
+ self.vapi.nat44_add_del_lb_static_mapping(is_add=1,
+ external_addr=external_addr,
+ external_port=external_port,
+ protocol=IP_PROTOS.tcp,
+ affinity=10800,
+ local_num=len(locals),
+ locals=locals)
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat44_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ TCP(sport=1025, dport=external_port))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ backend = capture[0][IP].dst
+
+ sessions = self.vapi.nat44_user_session_dump(backend, 0)
+ self.assertEqual(len(sessions), 1)
+ self.assertTrue(sessions[0].flags &
+ self.config_flags.NAT_IS_EXT_HOST_VALID)
+ self.vapi.nat44_del_session(
+ address=sessions[0].inside_ip_address,
+ port=sessions[0].inside_port,
+ protocol=sessions[0].protocol,
+ flags=(self.config_flags.NAT_IS_INSIDE |
+ self.config_flags.NAT_IS_EXT_HOST_VALID),
+ ext_host_address=sessions[0].ext_host_address,
+ ext_host_port=sessions[0].ext_host_port)
+
+ pkts = []
+ for port in range(1030, 1100):
+ p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ TCP(sport=port, dport=external_port))
+ pkts.append(p)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ for p in capture:
+ self.assertEqual(p[IP].dst, backend)
+
+ def test_multiple_vrf(self):
+ """ NAT44ED Multiple VRF setup """
+
+ external_addr = '1.2.3.4'
+ external_port = 80
+ local_port = 8080
+ port = 0
+
+ self.vapi.nat44_forwarding_enable_disable(enable=1)
+ self.nat_add_address(self.nat_addr)
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat44_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ is_add=1)
+ self.vapi.nat44_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ is_add=1, flags=flags)
+ self.vapi.nat44_interface_add_del_output_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+ self.vapi.nat44_interface_add_del_feature(
+ sw_if_index=self.pg5.sw_if_index,
+ is_add=1)
+ self.vapi.nat44_interface_add_del_feature(
+ sw_if_index=self.pg5.sw_if_index,
+ is_add=1, flags=flags)
+ self.vapi.nat44_interface_add_del_feature(
+ sw_if_index=self.pg6.sw_if_index,
+ is_add=1)
+ flags = self.config_flags.NAT_IS_OUT2IN_ONLY
+ self.nat_add_static_mapping(self.pg5.remote_ip4, external_addr,
+ local_port, external_port, vrf_id=1,
+ proto=IP_PROTOS.tcp, flags=flags)
+ self.nat_add_static_mapping(
+ self.pg0.remote_ip4,
+ external_sw_if_index=self.pg0.sw_if_index,
+ local_port=local_port,
+ vrf_id=0,
+ external_port=external_port,
+ proto=IP_PROTOS.tcp,
+ flags=flags
+ )
+
+ # from client to service (both VRF1)
+ p = (Ether(src=self.pg6.remote_mac, dst=self.pg6.local_mac) /
+ IP(src=self.pg6.remote_ip4, dst=external_addr) /
+ TCP(sport=12345, dport=external_port))
+ self.pg6.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg5.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.dst, self.pg5.remote_ip4)
+ self.assertEqual(tcp.dport, local_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # from service back to client (both VRF1)
+ p = (Ether(src=self.pg5.remote_mac, dst=self.pg5.local_mac) /
+ IP(src=self.pg5.remote_ip4, dst=self.pg6.remote_ip4) /
+ TCP(sport=local_port, dport=12345))
+ self.pg5.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg6.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, external_addr)
+ self.assertEqual(tcp.sport, external_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # dynamic NAT from VRF1 to VRF0 (output-feature)
+ p = (Ether(src=self.pg5.remote_mac, dst=self.pg5.local_mac) /
+ IP(src=self.pg5.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=2345, dport=22))
+ self.pg5.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.nat_addr)
+ self.assert_packet_checksums_valid(p)
+ port = tcp.sport
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ TCP(sport=22, dport=port))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg5.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.dst, self.pg5.remote_ip4)
+ self.assertEqual(tcp.dport, 2345)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # from client VRF1 to service VRF0
+ p = (Ether(src=self.pg6.remote_mac, dst=self.pg6.local_mac) /
+ IP(src=self.pg6.remote_ip4, dst=self.pg0.local_ip4) /
+ TCP(sport=12346, dport=external_port))
+ self.pg6.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.dst, self.pg0.remote_ip4)
+ self.assertEqual(tcp.dport, local_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # from service VRF0 back to client VRF1
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg6.remote_ip4) /
+ TCP(sport=local_port, dport=12346))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg6.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.pg0.local_ip4)
+ self.assertEqual(tcp.sport, external_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # from client VRF0 to service VRF1
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=external_addr) /
+ TCP(sport=12347, dport=external_port))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg5.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.dst, self.pg5.remote_ip4)
+ self.assertEqual(tcp.dport, local_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # from service VRF1 back to client VRF0
+ p = (Ether(src=self.pg5.remote_mac, dst=self.pg5.local_mac) /
+ IP(src=self.pg5.remote_ip4, dst=self.pg0.remote_ip4) /
+ TCP(sport=local_port, dport=12347))
+ self.pg5.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, external_addr)
+ self.assertEqual(tcp.sport, external_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # from client to server (both VRF1, no translation)
+ p = (Ether(src=self.pg6.remote_mac, dst=self.pg6.local_mac) /
+ IP(src=self.pg6.remote_ip4, dst=self.pg5.remote_ip4) /
+ TCP(sport=12348, dport=local_port))
+ self.pg6.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg5.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.dst, self.pg5.remote_ip4)
+ self.assertEqual(tcp.dport, local_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # from server back to client (both VRF1, no translation)
+ p = (Ether(src=self.pg5.remote_mac, dst=self.pg5.local_mac) /
+ IP(src=self.pg5.remote_ip4, dst=self.pg6.remote_ip4) /
+ TCP(sport=local_port, dport=12348))
+ self.pg5.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg6.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.pg5.remote_ip4)
+ self.assertEqual(tcp.sport, local_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # from client VRF1 to server VRF0 (no translation)
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg6.remote_ip4) /
+ TCP(sport=local_port, dport=12349))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg6.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.pg0.remote_ip4)
+ self.assertEqual(tcp.sport, local_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # from server VRF0 back to client VRF1 (no translation)
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg6.remote_ip4) /
+ TCP(sport=local_port, dport=12349))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg6.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.pg0.remote_ip4)
+ self.assertEqual(tcp.sport, local_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # from client VRF0 to server VRF1 (no translation)
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg5.remote_ip4) /
+ TCP(sport=12344, dport=local_port))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg5.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.dst, self.pg5.remote_ip4)
+ self.assertEqual(tcp.dport, local_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # from server VRF1 back to client VRF0 (no translation)
+ p = (Ether(src=self.pg5.remote_mac, dst=self.pg5.local_mac) /
+ IP(src=self.pg5.remote_ip4, dst=self.pg0.remote_ip4) /
+ TCP(sport=local_port, dport=12344))
+ self.pg5.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.pg5.remote_ip4)
+ self.assertEqual(tcp.sport, local_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ def test_outside_address_distribution(self):
+ """ Outside address distribution based on source address """
+
+ x = 100
+ nat_addresses = []
+
+ for i in range(1, x):
+ a = "10.0.0.%d" % i
+ nat_addresses.append(a)
+
+ self.nat_add_inside_interface(self.pg0)
+ self.nat_add_outside_interface(self.pg1)
+
+ self.vapi.nat44_add_del_address_range(
+ first_ip_address=nat_addresses[0],
+ last_ip_address=nat_addresses[-1],
+ vrf_id=0xFFFFFFFF, is_add=1, flags=0)
+
+ self.pg0.generate_remote_hosts(x)
+
+ pkts = []
+ for i in range(x):
+ info = self.create_packet_info(self.pg0, self.pg1)
+ payload = self.info_to_payload(info)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_hosts[i].ip4,
+ dst=self.pg1.remote_ip4) /
+ UDP(sport=7000+i, dport=8000+i) /
+ Raw(payload))
+ info.data = p
+ pkts.append(p)
+
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ recvd = self.pg1.get_capture(len(pkts))
+ for p_recvd in recvd:
+ payload_info = self.payload_to_info(p_recvd[Raw])
+ packet_index = payload_info.index
+ info = self._packet_infos[packet_index]
+ self.assertTrue(info is not None)
+ self.assertEqual(packet_index, info.index)
+ p_sent = info.data
+ packed = socket.inet_aton(p_sent[IP].src)
+ numeric = struct.unpack("!L", packed)[0]
+ numeric = socket.htonl(numeric)
+ a = nat_addresses[(numeric-1) % len(nat_addresses)]
+ self.assertEqual(
+ a, p_recvd[IP].src,
+ "Invalid packet (src IP %s translated to %s, but expected %s)"
+ % (p_sent[IP].src, p_recvd[IP].src, a))
+
+
+class TestNAT44EDMW(TestNAT44ED):
+ """ NAT44ED MW Test Case """
+ vpp_worker_count = 4
+ max_sessions = 5000
+
+ @unittest.skip('MW fix required')
+ def test_users_dump(self):
+ """ NAT44ED API test - nat44_user_dump """
+
+ @unittest.skip('MW fix required')
+ def test_frag_out_of_order_do_not_translate(self):
+ """ NAT44ED don't translate fragments arriving out of order """
+
+ @unittest.skip('MW fix required')
+ def test_forwarding(self):
+ """ NAT44ED forwarding test """
+
+ @unittest.skip('MW fix required')
+ def test_twice_nat(self):
+ """ NAT44ED Twice NAT """
+
+ @unittest.skip('MW fix required')
+ def test_twice_nat_lb(self):
+ """ NAT44ED Twice NAT local service load balancing """
+
+ @unittest.skip('MW fix required')
+ def test_output_feature(self):
+ """ NAT44ED interface output feature (in2out postrouting) """
+
+ @unittest.skip('MW fix required')
+ def test_static_with_port_out2(self):
+ """ NAT44ED 1:1 NAPT asymmetrical rule """
+
+ @unittest.skip('MW fix required')
+ def test_output_feature_and_service2(self):
+ """ NAT44ED interface output feature and service host direct access """
+
+ @unittest.skip('MW fix required')
+ def test_static_lb(self):
+ """ NAT44ED local service load balancing """
+
+ @unittest.skip('MW fix required')
+ def test_static_lb_2(self):
+ """ NAT44ED local service load balancing (asymmetrical rule) """
+
+ @unittest.skip('MW fix required')
+ def test_lb_affinity(self):
+ """ NAT44ED local service load balancing affinity """
+
+ @unittest.skip('MW fix required')
+ def test_multiple_vrf(self):
+ """ NAT44ED Multiple VRF setup """
+
+ @unittest.skip('MW fix required')
+ def test_self_twice_nat_positive(self):
+ """ NAT44ED Self Twice NAT (positive test) """
+
+ @unittest.skip('MW fix required')
+ def test_self_twice_nat_lb_positive(self):
+ """ NAT44ED Self Twice NAT local service load balancing (positive test)
+ """
+
+ def test_dynamic(self):
+ """ NAT44ED dynamic translation test """
+ pkt_count = 1500
+ tcp_port_offset = 20
+ udp_port_offset = 20
+ icmp_id_offset = 20
+
+ self.nat_add_address(self.nat_addr)
+ self.nat_add_inside_interface(self.pg0)
+ self.nat_add_outside_interface(self.pg1)
+
+ # in2out
+ tc1 = self.statistics['/nat44-ed/in2out/slowpath/tcp']
+ uc1 = self.statistics['/nat44-ed/in2out/slowpath/udp']
+ ic1 = self.statistics['/nat44-ed/in2out/slowpath/icmp']
+ dc1 = self.statistics['/nat44-ed/in2out/slowpath/drops']
+
+ i2o_pkts = [[] for x in range(0, self.vpp_worker_count)]
+
+ for i in range(pkt_count):
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=tcp_port_offset + i, dport=20))
+ i2o_pkts[p[TCP].sport % self.vpp_worker_count].append(p)
+
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ UDP(sport=udp_port_offset + i, dport=20))
+ i2o_pkts[p[UDP].sport % self.vpp_worker_count].append(p)
+
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ ICMP(id=icmp_id_offset + i, type='echo-request'))
+ i2o_pkts[p[ICMP].id % self.vpp_worker_count].append(p)
+
+ for i in range(0, self.vpp_worker_count):
+ if len(i2o_pkts[i]) > 0:
+ self.pg0.add_stream(i2o_pkts[i], worker=i)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(pkt_count * 3)
+
+ if_idx = self.pg0.sw_if_index
+ tc2 = self.statistics['/nat44-ed/in2out/slowpath/tcp']
+ uc2 = self.statistics['/nat44-ed/in2out/slowpath/udp']
+ ic2 = self.statistics['/nat44-ed/in2out/slowpath/icmp']
+ dc2 = self.statistics['/nat44-ed/in2out/slowpath/drops']
+
+ self.assertEqual(
+ tc2[:, if_idx].sum() - tc1[:, if_idx].sum(), pkt_count)
+ self.assertEqual(
+ uc2[:, if_idx].sum() - uc1[:, if_idx].sum(), pkt_count)
+ self.assertEqual(
+ ic2[:, if_idx].sum() - ic1[:, if_idx].sum(), pkt_count)
+ self.assertEqual(dc2[:, if_idx].sum() - dc1[:, if_idx].sum(), 0)
+
+ self.logger.info(self.vapi.cli("show trace"))
+
+ # out2in
+ tc1 = self.statistics['/nat44-ed/out2in/fastpath/tcp']
+ uc1 = self.statistics['/nat44-ed/out2in/fastpath/udp']
+ ic1 = self.statistics['/nat44-ed/out2in/fastpath/icmp']
+ dc1 = self.statistics['/nat44-ed/out2in/fastpath/drops']
+
+ recvd_tcp_ports = set()
+ recvd_udp_ports = set()
+ recvd_icmp_ids = set()
+
+ for p in capture:
+ if TCP in p:
+ recvd_tcp_ports.add(p[TCP].sport)
+ if UDP in p:
+ recvd_udp_ports.add(p[UDP].sport)
+ if ICMP in p:
+ recvd_icmp_ids.add(p[ICMP].id)
+
+ recvd_tcp_ports = list(recvd_tcp_ports)
+ recvd_udp_ports = list(recvd_udp_ports)
+ recvd_icmp_ids = list(recvd_icmp_ids)
+
+ o2i_pkts = [[] for x in range(0, self.vpp_worker_count)]
+ for i in range(pkt_count):
+ p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ TCP(dport=choice(recvd_tcp_ports), sport=20))
+ o2i_pkts[p[TCP].dport % self.vpp_worker_count].append(p)
+
+ p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ UDP(dport=choice(recvd_udp_ports), sport=20))
+ o2i_pkts[p[UDP].dport % self.vpp_worker_count].append(p)
+
+ p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ ICMP(id=choice(recvd_icmp_ids), type='echo-reply'))
+ o2i_pkts[p[ICMP].id % self.vpp_worker_count].append(p)
+
+ for i in range(0, self.vpp_worker_count):
+ if len(o2i_pkts[i]) > 0:
+ self.pg1.add_stream(o2i_pkts[i], worker=i)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(pkt_count * 3)
+ for packet in capture:
+ try:
+ self.assert_packet_checksums_valid(packet)
+ self.assertEqual(packet[IP].dst, self.pg0.remote_ip4)
+ if packet.haslayer(TCP):
+ self.assert_in_range(
+ packet[TCP].dport, tcp_port_offset,
+ tcp_port_offset + pkt_count, "dst TCP port")
+ elif packet.haslayer(UDP):
+ self.assert_in_range(
+ packet[UDP].dport, udp_port_offset,
+ udp_port_offset + pkt_count, "dst UDP port")
+ else:
+ self.assert_in_range(
+ packet[ICMP].id, icmp_id_offset,
+ icmp_id_offset + pkt_count, "ICMP id")
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet "
+ "(inside network):", packet))
+ raise
+
+ if_idx = self.pg1.sw_if_index
+ tc2 = self.statistics['/nat44-ed/out2in/fastpath/tcp']
+ uc2 = self.statistics['/nat44-ed/out2in/fastpath/udp']
+ ic2 = self.statistics['/nat44-ed/out2in/fastpath/icmp']
+ dc2 = self.statistics['/nat44-ed/out2in/fastpath/drops']
+
+ self.assertEqual(
+ tc2[:, if_idx].sum() - tc1[:, if_idx].sum(), pkt_count)
+ self.assertEqual(
+ uc2[:, if_idx].sum() - uc1[:, if_idx].sum(), pkt_count)
+ self.assertEqual(
+ ic2[:, if_idx].sum() - ic1[:, if_idx].sum(), pkt_count)
+ self.assertEqual(dc2[:, if_idx].sum() - dc1[:, if_idx].sum(), 0)
+
+ sc = self.statistics['/nat44-ed/total-sessions']
+ self.assertEqual(sc[:, 0].sum(), len(recvd_tcp_ports) +
+ len(recvd_udp_ports) + len(recvd_icmp_ids))
+
+ def test_frag_in_order(self):
+ """ NAT44ED translate fragments arriving in order """
+
+ self.nat_add_address(self.nat_addr)
+ self.nat_add_inside_interface(self.pg0)
+ self.nat_add_outside_interface(self.pg1)
+
+ self.frag_in_order(proto=IP_PROTOS.tcp, ignore_port=True)
+ self.frag_in_order(proto=IP_PROTOS.udp, ignore_port=True)
+ self.frag_in_order(proto=IP_PROTOS.icmp, ignore_port=True)
+
+ def test_frag_in_order_do_not_translate(self):
+ """ NAT44ED don't translate fragments arriving in order """
+
+ self.nat_add_address(self.nat_addr)
+ self.nat_add_inside_interface(self.pg0)
+ self.nat_add_outside_interface(self.pg1)
+ self.vapi.nat44_forwarding_enable_disable(enable=True)
+
+ self.frag_in_order(proto=IP_PROTOS.tcp, dont_translate=True)
+
+ def test_frag_out_of_order(self):
+ """ NAT44ED translate fragments arriving out of order """
+
+ self.nat_add_address(self.nat_addr)
+ self.nat_add_inside_interface(self.pg0)
+ self.nat_add_outside_interface(self.pg1)
+
+ self.frag_out_of_order(proto=IP_PROTOS.tcp, ignore_port=True)
+ self.frag_out_of_order(proto=IP_PROTOS.udp, ignore_port=True)
+ self.frag_out_of_order(proto=IP_PROTOS.icmp, ignore_port=True)
+
+ def test_frag_in_order_in_plus_out(self):
+ """ NAT44ED in+out interface fragments in order """
+
+ in_port = self.random_port()
+ out_port = self.random_port()
+
+ self.nat_add_address(self.nat_addr)
+ self.nat_add_inside_interface(self.pg0)
+ self.nat_add_outside_interface(self.pg0)
+ self.nat_add_inside_interface(self.pg1)
+ self.nat_add_outside_interface(self.pg1)
+
+ # add static mappings for server
+ self.nat_add_static_mapping(self.server_addr,
+ self.nat_addr,
+ in_port,
+ out_port,
+ proto=IP_PROTOS.tcp)
+ self.nat_add_static_mapping(self.server_addr,
+ self.nat_addr,
+ in_port,
+ out_port,
+ proto=IP_PROTOS.udp)
+ self.nat_add_static_mapping(self.server_addr,
+ self.nat_addr,
+ proto=IP_PROTOS.icmp)
+
+ # run tests for each protocol
+ self.frag_in_order_in_plus_out(self.server_addr,
+ self.nat_addr,
+ in_port,
+ out_port,
+ IP_PROTOS.tcp)
+ self.frag_in_order_in_plus_out(self.server_addr,
+ self.nat_addr,
+ in_port,
+ out_port,
+ IP_PROTOS.udp)
+ self.frag_in_order_in_plus_out(self.server_addr,
+ self.nat_addr,
+ in_port,
+ out_port,
+ IP_PROTOS.icmp)
+
+ def test_frag_out_of_order_in_plus_out(self):
+ """ NAT44ED in+out interface fragments out of order """
+
+ in_port = self.random_port()
+ out_port = self.random_port()
+
+ self.nat_add_address(self.nat_addr)
+ self.nat_add_inside_interface(self.pg0)
+ self.nat_add_outside_interface(self.pg0)
+ self.nat_add_inside_interface(self.pg1)
+ self.nat_add_outside_interface(self.pg1)
+
+ # add static mappings for server
+ self.nat_add_static_mapping(self.server_addr,
+ self.nat_addr,
+ in_port,
+ out_port,
+ proto=IP_PROTOS.tcp)
+ self.nat_add_static_mapping(self.server_addr,
+ self.nat_addr,
+ in_port,
+ out_port,
+ proto=IP_PROTOS.udp)
+ self.nat_add_static_mapping(self.server_addr,
+ self.nat_addr,
+ proto=IP_PROTOS.icmp)
+
+ # run tests for each protocol
+ self.frag_out_of_order_in_plus_out(self.server_addr,
+ self.nat_addr,
+ in_port,
+ out_port,
+ IP_PROTOS.tcp)
+ self.frag_out_of_order_in_plus_out(self.server_addr,
+ self.nat_addr,
+ in_port,
+ out_port,
+ IP_PROTOS.udp)
+ self.frag_out_of_order_in_plus_out(self.server_addr,
+ self.nat_addr,
+ in_port,
+ out_port,
+ IP_PROTOS.icmp)
+
+ def test_reass_hairpinning(self):
+ """ NAT44ED fragments hairpinning """
+
+ server_addr = self.pg0.remote_hosts[1].ip4
+
+ host_in_port = self.random_port()
+ server_in_port = self.random_port()
+ server_out_port = self.random_port()
+
+ self.nat_add_address(self.nat_addr)
+ self.nat_add_inside_interface(self.pg0)
+ self.nat_add_outside_interface(self.pg1)
+
+ # add static mapping for server
+ self.nat_add_static_mapping(server_addr, self.nat_addr,
+ server_in_port, server_out_port,
+ proto=IP_PROTOS.tcp)
+ self.nat_add_static_mapping(server_addr, self.nat_addr,
+ server_in_port, server_out_port,
+ proto=IP_PROTOS.udp)
+ self.nat_add_static_mapping(server_addr, self.nat_addr)
+
+ self.reass_hairpinning(server_addr, server_in_port, server_out_port,
+ host_in_port, proto=IP_PROTOS.tcp,
+ ignore_port=True)
+ self.reass_hairpinning(server_addr, server_in_port, server_out_port,
+ host_in_port, proto=IP_PROTOS.udp,
+ ignore_port=True)
+ self.reass_hairpinning(server_addr, server_in_port, server_out_port,
+ host_in_port, proto=IP_PROTOS.icmp,
+ ignore_port=True)
+
+ def test_session_limit_per_vrf(self):
+ """ NAT44ED per vrf session limit """
+
+ inside = self.pg0
+ inside_vrf10 = self.pg2
+ outside = self.pg1
+
+ limit = 5
+
+ # 2 interfaces pg0, pg1 (vrf10, limit 1 tcp session)
+ # non existing vrf_id makes process core dump
+ self.vapi.nat44_set_session_limit(session_limit=limit, vrf_id=10)
+
+ self.nat_add_inside_interface(inside)
+ self.nat_add_inside_interface(inside_vrf10)
+ self.nat_add_outside_interface(outside)
+
+ # vrf independent
+ self.nat_add_interface_address(outside)
+
+ # BUG: causing core dump - when bad vrf_id is specified
+ # self.nat_add_address(outside.local_ip4, vrf_id=20)
+
+ stream = self.create_tcp_stream(inside_vrf10, outside, limit * 2)
+ inside_vrf10.add_stream(stream)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ capture = outside.get_capture(limit)
+
+ stream = self.create_tcp_stream(inside, outside, limit * 2)
+ inside.add_stream(stream)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ capture = outside.get_capture(len(stream))
+
+ def test_show_max_translations(self):
+ """ NAT44ED API test - max translations per thread """
+ nat_config = self.vapi.nat_show_config_2()
+ self.assertEqual(self.max_sessions,
+ nat_config.max_translations_per_thread)
+
+ def test_lru_cleanup(self):
+ """ NAT44ED LRU cleanup algorithm """
+
+ self.nat_add_address(self.nat_addr)
+ self.nat_add_inside_interface(self.pg0)
+ self.nat_add_outside_interface(self.pg1)
+
+ self.vapi.nat_set_timeouts(
+ udp=1, tcp_established=7440, tcp_transitory=30, icmp=1)
+
+ tcp_port_out = self.init_tcp_session(self.pg0, self.pg1, 2000, 80)
+ pkts = []
+ for i in range(0, self.max_sessions - 1):
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4, ttl=64) /
+ UDP(sport=7000+i, dport=80))
+ pkts.append(p)
+
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(len(pkts))
+ self.sleep(1.5, "wait for timeouts")
+
+ pkts = []
+ for i in range(0, self.max_sessions - 1):
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4, ttl=64) /
+ ICMP(id=8000+i, type='echo-request'))
+ pkts.append(p)
+
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(len(pkts))
+
+ def test_session_rst_timeout(self):
+ """ NAT44ED session RST timeouts """
+
+ self.nat_add_address(self.nat_addr)
+ self.nat_add_inside_interface(self.pg0)
+ self.nat_add_outside_interface(self.pg1)
+
+ self.vapi.nat_set_timeouts(udp=300, tcp_established=7440,
+ tcp_transitory=5, icmp=60)
+
+ self.init_tcp_session(self.pg0, self.pg1, self.tcp_port_in,
+ self.tcp_external_port)
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=self.tcp_port_in, dport=self.tcp_external_port,
+ flags="R"))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(1)
+
+ self.sleep(6)
+
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=self.tcp_port_in + 1, dport=self.tcp_external_port + 1,
+ flags="S"))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(1)
+
+ def test_dynamic_out_of_ports(self):
+ """ NAT44ED dynamic translation test: out of ports """
+
+ self.nat_add_inside_interface(self.pg0)
+ self.nat_add_outside_interface(self.pg1)
+
+ # in2out and no NAT addresses added
+ err_old = self.statistics.get_err_counter(
+ '/err/nat44-ed-in2out-slowpath/out of ports')
+
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(0, timeout=1)
+
+ err_new = self.statistics.get_err_counter(
+ '/err/nat44-ed-in2out-slowpath/out of ports')
+
+ self.assertEqual(err_new - err_old, len(pkts))
+
+ # in2out after NAT addresses added
+ self.nat_add_address(self.nat_addr)
+
+ err_old = self.statistics.get_err_counter(
+ '/err/nat44-ed-in2out-slowpath/out of ports')
+
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture, ignore_port=True)
+
+ err_new = self.statistics.get_err_counter(
+ '/err/nat44-ed-in2out-slowpath/out of ports')
+
+ self.assertEqual(err_new, err_old)
+
+ def test_unknown_proto(self):
+ """ NAT44ED translate packet with unknown protocol """
+
+ self.nat_add_address(self.nat_addr)
+ self.nat_add_inside_interface(self.pg0)
+ self.nat_add_outside_interface(self.pg1)
+
+ # in2out
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=self.tcp_port_in, dport=20))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ p = self.pg1.get_capture(1)
+
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ GRE() /
+ IP(src=self.pg2.remote_ip4, dst=self.pg2.remote_ip4) /
+ TCP(sport=1234, dport=1234))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ p = self.pg1.get_capture(1)
+ packet = p[0]
+ try:
+ self.assertEqual(packet[IP].src, self.nat_addr)
+ self.assertEqual(packet[IP].dst, self.pg1.remote_ip4)
+ self.assertEqual(packet.haslayer(GRE), 1)
+ self.assert_packet_checksums_valid(packet)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ # out2in
+ p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ GRE() /
+ IP(src=self.pg2.remote_ip4, dst=self.pg2.remote_ip4) /
+ TCP(sport=1234, dport=1234))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ p = self.pg0.get_capture(1)
+ packet = p[0]
+ try:
+ self.assertEqual(packet[IP].src, self.pg1.remote_ip4)
+ self.assertEqual(packet[IP].dst, self.pg0.remote_ip4)
+ self.assertEqual(packet.haslayer(GRE), 1)
+ self.assert_packet_checksums_valid(packet)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ def test_hairpinning_unknown_proto(self):
+ """ NAT44ED translate packet with unknown protocol - hairpinning """
+ host = self.pg0.remote_hosts[0]
+ server = self.pg0.remote_hosts[1]
+ host_in_port = 1234
+ server_out_port = 8765
+ server_nat_ip = "10.0.0.11"
+
+ self.nat_add_address(self.nat_addr)
+ self.nat_add_inside_interface(self.pg0)
+ self.nat_add_outside_interface(self.pg1)
+
+ # add static mapping for server
+ self.nat_add_static_mapping(server.ip4, server_nat_ip)
+
+ # host to server
+ p = (Ether(src=host.mac, dst=self.pg0.local_mac) /
+ IP(src=host.ip4, dst=server_nat_ip) /
+ TCP(sport=host_in_port, dport=server_out_port))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg0.get_capture(1)
+
+ p = (Ether(dst=self.pg0.local_mac, src=host.mac) /
+ IP(src=host.ip4, dst=server_nat_ip) /
+ GRE() /
+ IP(src=self.pg2.remote_ip4, dst=self.pg2.remote_ip4) /
+ TCP(sport=1234, dport=1234))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ p = self.pg0.get_capture(1)
+ packet = p[0]
+ try:
+ self.assertEqual(packet[IP].src, self.nat_addr)
+ self.assertEqual(packet[IP].dst, server.ip4)
+ self.assertEqual(packet.haslayer(GRE), 1)
+ self.assert_packet_checksums_valid(packet)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ # server to host
+ p = (Ether(dst=self.pg0.local_mac, src=server.mac) /
+ IP(src=server.ip4, dst=self.nat_addr) /
+ GRE() /
+ IP(src=self.pg2.remote_ip4, dst=self.pg2.remote_ip4) /
+ TCP(sport=1234, dport=1234))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ p = self.pg0.get_capture(1)
+ packet = p[0]
+ try:
+ self.assertEqual(packet[IP].src, server_nat_ip)
+ self.assertEqual(packet[IP].dst, host.ip4)
+ self.assertEqual(packet.haslayer(GRE), 1)
+ self.assert_packet_checksums_valid(packet)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ def test_output_feature_and_service(self):
+ """ NAT44ED interface output feature and services """
+ external_addr = '1.2.3.4'
+ external_port = 80
+ local_port = 8080
+
+ self.vapi.nat44_forwarding_enable_disable(enable=1)
+ self.nat_add_address(self.nat_addr)
+ flags = self.config_flags.NAT_IS_ADDR_ONLY
+ self.vapi.nat44_add_del_identity_mapping(
+ ip_address=self.pg1.remote_ip4, sw_if_index=0xFFFFFFFF,
+ flags=flags, is_add=1)
+ flags = self.config_flags.NAT_IS_OUT2IN_ONLY
+ self.nat_add_static_mapping(self.pg0.remote_ip4, external_addr,
+ local_port, external_port,
+ proto=IP_PROTOS.tcp, flags=flags)
+
+ self.nat_add_inside_interface(self.pg0)
+ self.nat_add_outside_interface(self.pg0)
+ self.vapi.nat44_interface_add_del_output_feature(
+ sw_if_index=self.pg1.sw_if_index, is_add=1)
+
+ # from client to service
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=external_addr) /
+ TCP(sport=12345, dport=external_port))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.dst, self.pg0.remote_ip4)
+ self.assertEqual(tcp.dport, local_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # from service back to client
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=local_port, dport=12345))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, external_addr)
+ self.assertEqual(tcp.sport, external_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # from local network host to external network
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture, ignore_port=True)
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture, ignore_port=True)
+
+ # from external network back to local network host
+ pkts = self.create_stream_out(self.pg1)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg0)
+
+ def test_output_feature_and_service3(self):
+ """ NAT44ED interface output feature and DST NAT """
+ external_addr = '1.2.3.4'
+ external_port = 80
+ local_port = 8080
+
+ self.vapi.nat44_forwarding_enable_disable(enable=1)
+ self.nat_add_address(self.nat_addr)
+ flags = self.config_flags.NAT_IS_OUT2IN_ONLY
+ self.nat_add_static_mapping(self.pg1.remote_ip4, external_addr,
+ local_port, external_port,
+ proto=IP_PROTOS.tcp, flags=flags)
+
+ self.nat_add_inside_interface(self.pg0)
+ self.nat_add_outside_interface(self.pg0)
+ self.vapi.nat44_interface_add_del_output_feature(
+ sw_if_index=self.pg1.sw_if_index, is_add=1)
+
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=external_addr) /
+ TCP(sport=12345, dport=external_port))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.pg0.remote_ip4)
+ self.assertEqual(tcp.sport, 12345)
+ self.assertEqual(ip.dst, self.pg1.remote_ip4)
+ self.assertEqual(tcp.dport, local_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.pg0.remote_ip4) /
+ TCP(sport=local_port, dport=12345))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, external_addr)
+ self.assertEqual(tcp.sport, external_port)
+ self.assertEqual(ip.dst, self.pg0.remote_ip4)
+ self.assertEqual(tcp.dport, 12345)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ def test_self_twice_nat_lb_negative(self):
+ """ NAT44ED Self Twice NAT local service load balancing (negative test)
+ """
+ self.twice_nat_common(lb=True, self_twice_nat=True, same_pg=True,
+ client_id=2)
+
+ def test_self_twice_nat_negative(self):
+ """ NAT44ED Self Twice NAT (negative test) """
+ self.twice_nat_common(self_twice_nat=True)
+
+ def test_static_lb_multi_clients(self):
+ """ NAT44ED local service load balancing - multiple clients"""
+
+ external_addr = self.nat_addr
+ external_port = 80
+ local_port = 8080
+ server1 = self.pg0.remote_hosts[0]
+ server2 = self.pg0.remote_hosts[1]
+ server3 = self.pg0.remote_hosts[2]
+
+ locals = [{'addr': server1.ip4,
+ 'port': local_port,
+ 'probability': 90,
+ 'vrf_id': 0},
+ {'addr': server2.ip4,
+ 'port': local_port,
+ 'probability': 10,
+ 'vrf_id': 0}]
+
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat44_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ self.nat_add_address(self.nat_addr)
+ self.vapi.nat44_add_del_lb_static_mapping(is_add=1,
+ external_addr=external_addr,
+ external_port=external_port,
+ protocol=IP_PROTOS.tcp,
+ local_num=len(locals),
+ locals=locals)
+
+ server1_n = 0
+ server2_n = 0
+ clients = ip4_range(self.pg1.remote_ip4, 10, 50)
+ pkts = []
+ for client in clients:
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=client, dst=self.nat_addr) /
+ TCP(sport=12345, dport=external_port))
+ pkts.append(p)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ for p in capture:
+ if p[IP].dst == server1.ip4:
+ server1_n += 1
+ else:
+ server2_n += 1
+ self.assertGreaterEqual(server1_n, server2_n)
+
+ local = {
+ 'addr': server3.ip4,
+ 'port': local_port,
+ 'probability': 20,
+ 'vrf_id': 0
+ }
+
+ # add new back-end
+ self.vapi.nat44_lb_static_mapping_add_del_local(
+ is_add=1,
+ external_addr=external_addr,
+ external_port=external_port,
+ local=local,
+ protocol=IP_PROTOS.tcp)
+ server1_n = 0
+ server2_n = 0
+ server3_n = 0
+ clients = ip4_range(self.pg1.remote_ip4, 60, 110)
+ pkts = []
+ for client in clients:
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=client, dst=self.nat_addr) /
+ TCP(sport=12346, dport=external_port))
+ pkts.append(p)
+ self.assertGreater(len(pkts), 0)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ for p in capture:
+ if p[IP].dst == server1.ip4:
+ server1_n += 1
+ elif p[IP].dst == server2.ip4:
+ server2_n += 1
+ else:
+ server3_n += 1
+ self.assertGreater(server1_n, 0)
+ self.assertGreater(server2_n, 0)
+ self.assertGreater(server3_n, 0)
+
+ local = {
+ 'addr': server2.ip4,
+ 'port': local_port,
+ 'probability': 10,
+ 'vrf_id': 0
+ }
+
+ # remove one back-end
+ self.vapi.nat44_lb_static_mapping_add_del_local(
+ is_add=0,
+ external_addr=external_addr,
+ external_port=external_port,
+ local=local,
+ protocol=IP_PROTOS.tcp)
+ server1_n = 0
+ server2_n = 0
+ server3_n = 0
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ for p in capture:
+ if p[IP].dst == server1.ip4:
+ server1_n += 1
+ elif p[IP].dst == server2.ip4:
+ server2_n += 1
+ else:
+ server3_n += 1
+ self.assertGreater(server1_n, 0)
+ self.assertEqual(server2_n, 0)
+ self.assertGreater(server3_n, 0)
+
+ def test_syslog_sess(self):
+ """ NAT44ED Test syslog session creation and deletion """
+ self.vapi.syslog_set_filter(
+ self.syslog_severity.SYSLOG_API_SEVERITY_INFO)
+ self.vapi.syslog_set_sender(self.pg3.local_ip4, self.pg3.remote_ip4)
+
+ self.nat_add_address(self.nat_addr)
+ self.nat_add_inside_interface(self.pg0)
+ self.nat_add_outside_interface(self.pg1)
+
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=self.tcp_port_in, dport=self.tcp_external_port))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ self.tcp_port_out = capture[0][TCP].sport
+ capture = self.pg3.get_capture(1)
+ self.verify_syslog_sess(capture[0][Raw].load)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.nat_add_address(self.nat_addr, is_add=0)
+ capture = self.pg3.get_capture(1)
+ self.verify_syslog_sess(capture[0][Raw].load, False)
+
+ def test_twice_nat_interface_addr(self):
+ """ NAT44ED Acquire twice NAT addresses from interface """
+ flags = self.config_flags.NAT_IS_TWICE_NAT
+ self.vapi.nat44_add_del_interface_addr(
+ sw_if_index=self.pg11.sw_if_index,
+ flags=flags, is_add=1)
+
+ # no address in NAT pool
+ adresses = self.vapi.nat44_address_dump()
+ self.assertEqual(0, len(adresses))
+
+ # configure interface address and check NAT address pool
+ self.pg11.config_ip4()
+ adresses = self.vapi.nat44_address_dump()
+ self.assertEqual(1, len(adresses))
+ self.assertEqual(str(adresses[0].ip_address),
+ self.pg11.local_ip4)
+ self.assertEqual(adresses[0].flags, flags)
+
+ # remove interface address and check NAT address pool
+ self.pg11.unconfig_ip4()
+ adresses = self.vapi.nat44_address_dump()
+ self.assertEqual(0, len(adresses))
+
+ def test_output_feature_stateful_acl(self):
+ """ NAT44ED output feature works with stateful ACL """
+
+ self.nat_add_address(self.nat_addr)
+ self.vapi.nat44_interface_add_del_output_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=self.config_flags.NAT_IS_INSIDE, is_add=1)
+ self.vapi.nat44_interface_add_del_output_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ flags=self.config_flags.NAT_IS_OUTSIDE, is_add=1)
+
+ # First ensure that the NAT is working sans ACL
+
+ # send packets out2in, no sessions yet so packets should drop
+ pkts_out2in = self.create_stream_out(self.pg1)
+ self.send_and_assert_no_replies(self.pg1, pkts_out2in)
+
+ # send packets into inside intf, ensure received via outside intf
+ pkts_in2out = self.create_stream_in(self.pg0, self.pg1)
+ capture = self.send_and_expect(self.pg0, pkts_in2out, self.pg1,
+ len(pkts_in2out))
+ self.verify_capture_out(capture, ignore_port=True)
+
+ # send out2in again, with sessions created it should work now
+ pkts_out2in = self.create_stream_out(self.pg1)
+ capture = self.send_and_expect(self.pg1, pkts_out2in, self.pg0,
+ len(pkts_out2in))
+ self.verify_capture_in(capture, self.pg0)
+
+ # Create an ACL blocking everything
+ out2in_deny_rule = AclRule(is_permit=0)
+ out2in_acl = VppAcl(self, rules=[out2in_deny_rule])
+ out2in_acl.add_vpp_config()
+
+ # create an ACL to permit/reflect everything
+ in2out_reflect_rule = AclRule(is_permit=2)
+ in2out_acl = VppAcl(self, rules=[in2out_reflect_rule])
+ in2out_acl.add_vpp_config()
+
+ # apply as input acl on interface and confirm it blocks everything
+ acl_if = VppAclInterface(self, sw_if_index=self.pg1.sw_if_index,
+ n_input=1, acls=[out2in_acl])
+ acl_if.add_vpp_config()
+ self.send_and_assert_no_replies(self.pg1, pkts_out2in)
+
+ # apply output acl
+ acl_if.acls = [out2in_acl, in2out_acl]
+ acl_if.add_vpp_config()
+ # send in2out to generate ACL state (NAT state was created earlier)
+ capture = self.send_and_expect(self.pg0, pkts_in2out, self.pg1,
+ len(pkts_in2out))
+ self.verify_capture_out(capture, ignore_port=True)
+
+ # send out2in again. ACL state exists so it should work now.
+ # TCP packets with the syn flag set also need the ack flag
+ for p in pkts_out2in:
+ if p.haslayer(TCP) and p[TCP].flags & 0x02:
+ p[TCP].flags |= 0x10
+ capture = self.send_and_expect(self.pg1, pkts_out2in, self.pg0,
+ len(pkts_out2in))
+ self.verify_capture_in(capture, self.pg0)
+ self.logger.info(self.vapi.cli("show trace"))
+
+ def test_tcp_close(self):
+ """ NAT44ED Close TCP session from inside network - output feature """
+ old_timeouts = self.vapi.nat_get_timeouts()
+ new_transitory = 2
+ self.vapi.nat_set_timeouts(
+ udp=old_timeouts.udp,
+ tcp_established=old_timeouts.tcp_established,
+ icmp=old_timeouts.icmp,
+ tcp_transitory=new_transitory)
+
+ self.vapi.nat44_forwarding_enable_disable(enable=1)
+ self.nat_add_address(self.pg1.local_ip4)
+ twice_nat_addr = '10.0.1.3'
+ service_ip = '192.168.16.150'
+ self.nat_add_address(twice_nat_addr, twice_nat=1)
+
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat44_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ is_add=1)
+ self.vapi.nat44_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_interface_add_del_output_feature(
+ is_add=1,
+ sw_if_index=self.pg1.sw_if_index)
+
+ flags = (self.config_flags.NAT_IS_OUT2IN_ONLY |
+ self.config_flags.NAT_IS_TWICE_NAT)
+ self.nat_add_static_mapping(self.pg0.remote_ip4,
+ service_ip, 80, 80,
+ proto=IP_PROTOS.tcp,
+ flags=flags)
+ sessions = self.vapi.nat44_user_session_dump(self.pg0.remote_ip4, 0)
+ start_sessnum = len(sessions)
+
+ # SYN packet out->in
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=service_ip) /
+ TCP(sport=33898, dport=80, flags="S"))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ tcp_port = p[TCP].sport
+
+ # SYN + ACK packet in->out
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=twice_nat_addr) /
+ TCP(sport=80, dport=tcp_port, flags="SA"))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(1)
+
+ # ACK packet out->in
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=service_ip) /
+ TCP(sport=33898, dport=80, flags="A"))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg0.get_capture(1)
+
+ # FIN packet in -> out
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=twice_nat_addr) /
+ TCP(sport=80, dport=tcp_port, flags="FA", seq=100, ack=300))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(1)
+
+ # FIN+ACK packet out -> in
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=service_ip) /
+ TCP(sport=33898, dport=80, flags="FA", seq=300, ack=101))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg0.get_capture(1)
+
+ # ACK packet in -> out
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=twice_nat_addr) /
+ TCP(sport=80, dport=tcp_port, flags="A", seq=101, ack=301))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(1)
+
+ # session now in transitory timeout
+ # try SYN packet out->in - should be dropped
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=service_ip) /
+ TCP(sport=33898, dport=80, flags="S"))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ self.sleep(new_transitory, "wait for transitory timeout")
+ self.pg0.assert_nothing_captured(0)
+
+ # session should still exist
+ sessions = self.vapi.nat44_user_session_dump(self.pg0.remote_ip4, 0)
+ self.assertEqual(len(sessions) - start_sessnum, 1)
+
+ # send FIN+ACK packet out -> in - will cause session to be wiped
+ # but won't create a new session
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=service_ip) /
+ TCP(sport=33898, dport=80, flags="FA", seq=300, ack=101))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ sessions = self.vapi.nat44_user_session_dump(self.pg0.remote_ip4, 0)
+ self.assertEqual(len(sessions) - start_sessnum, 0)
+ self.pg0.assert_nothing_captured(0)
+
+ def test_tcp_session_close_in(self):
+ """ NAT44ED Close TCP session from inside network """
+
+ in_port = self.tcp_port_in
+ out_port = 10505
+ ext_port = self.tcp_external_port
+
+ self.nat_add_address(self.nat_addr)
+ self.nat_add_inside_interface(self.pg0)
+ self.nat_add_outside_interface(self.pg1)
+ self.nat_add_static_mapping(self.pg0.remote_ip4, self.nat_addr,
+ in_port, out_port, proto=IP_PROTOS.tcp,
+ flags=self.config_flags.NAT_IS_TWICE_NAT)
+
+ sessions = self.vapi.nat44_user_session_dump(self.pg0.remote_ip4, 0)
+ session_n = len(sessions)
+
+ self.vapi.nat_set_timeouts(udp=300, tcp_established=7440,
+ tcp_transitory=2, icmp=5)
+
+ self.init_tcp_session(self.pg0, self.pg1, in_port, ext_port)
+
+ # FIN packet in -> out
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=in_port, dport=ext_port,
+ flags="FA", seq=100, ack=300))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(1)
+
+ pkts = []
+
+ # ACK packet out -> in
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ TCP(sport=ext_port, dport=out_port,
+ flags="A", seq=300, ack=101))
+ pkts.append(p)
+
+ # FIN packet out -> in
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ TCP(sport=ext_port, dport=out_port,
+ flags="FA", seq=300, ack=101))
+ pkts.append(p)
+
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg0.get_capture(2)
+
+ # ACK packet in -> out
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=in_port, dport=ext_port,
+ flags="A", seq=101, ack=301))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(1)
+
+ sessions = self.vapi.nat44_user_session_dump(self.pg0.remote_ip4, 0)
+ self.assertEqual(len(sessions) - session_n, 1)
+
+ out2in_drops = self.get_err_counter(
+ '/err/nat44-ed-out2in/drops due to TCP in transitory timeout')
+ in2out_drops = self.get_err_counter(
+ '/err/nat44-ed-in2out/drops due to TCP in transitory timeout')
+
+ # extra FIN packet out -> in - this should be dropped
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ TCP(sport=ext_port, dport=out_port,
+ flags="FA", seq=300, ack=101))
+
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg0.assert_nothing_captured()
+
+ # extra ACK packet in -> out - this should be dropped
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=in_port, dport=ext_port,
+ flags="A", seq=101, ack=301))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.assert_nothing_captured()
+
+ stats = self.get_err_counter(
+ '/err/nat44-ed-out2in/drops due to TCP in transitory timeout')
+ self.assertEqual(stats - out2in_drops, 1)
+ stats = self.get_err_counter(
+ '/err/nat44-ed-in2out/drops due to TCP in transitory timeout')
+ self.assertEqual(stats - in2out_drops, 1)
+
+ self.sleep(3)
+ # extra ACK packet in -> out - this will cause session to be wiped
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=in_port, dport=ext_port,
+ flags="A", seq=101, ack=301))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.assert_nothing_captured()
+ sessions = self.vapi.nat44_user_session_dump(self.pg0.remote_ip4, 0)
+ self.assertEqual(len(sessions) - session_n, 0)
+
+ def test_tcp_session_close_out(self):
+ """ NAT44ED Close TCP session from outside network """
+
+ in_port = self.tcp_port_in
+ out_port = 10505
+ ext_port = self.tcp_external_port
+
+ self.nat_add_address(self.nat_addr)
+ self.nat_add_inside_interface(self.pg0)
+ self.nat_add_outside_interface(self.pg1)
+ self.nat_add_static_mapping(self.pg0.remote_ip4, self.nat_addr,
+ in_port, out_port, proto=IP_PROTOS.tcp,
+ flags=self.config_flags.NAT_IS_TWICE_NAT)
+
+ sessions = self.vapi.nat44_user_session_dump(self.pg0.remote_ip4, 0)
+ session_n = len(sessions)
+
+ self.vapi.nat_set_timeouts(udp=300, tcp_established=7440,
+ tcp_transitory=2, icmp=5)
+
+ _ = self.init_tcp_session(self.pg0, self.pg1, in_port, ext_port)
+
+ # FIN packet out -> in
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ TCP(sport=ext_port, dport=out_port,
+ flags="FA", seq=100, ack=300))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg0.get_capture(1)
+
+ # FIN+ACK packet in -> out
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=in_port, dport=ext_port,
+ flags="FA", seq=300, ack=101))
+
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(1)
+
+ # ACK packet out -> in
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ TCP(sport=ext_port, dport=out_port,
+ flags="A", seq=101, ack=301))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg0.get_capture(1)
+
+ sessions = self.vapi.nat44_user_session_dump(self.pg0.remote_ip4, 0)
+ self.assertEqual(len(sessions) - session_n, 1)
+
+ out2in_drops = self.get_err_counter(
+ '/err/nat44-ed-out2in/drops due to TCP in transitory timeout')
+ in2out_drops = self.get_err_counter(
+ '/err/nat44-ed-in2out/drops due to TCP in transitory timeout')
+
+ # extra FIN packet out -> in - this should be dropped
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ TCP(sport=ext_port, dport=out_port,
+ flags="FA", seq=300, ack=101))
+
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg0.assert_nothing_captured()
+
+ # extra ACK packet in -> out - this should be dropped
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=in_port, dport=ext_port,
+ flags="A", seq=101, ack=301))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.assert_nothing_captured()
+
+ stats = self.get_err_counter(
+ '/err/nat44-ed-out2in/drops due to TCP in transitory timeout')
+ self.assertEqual(stats - out2in_drops, 1)
+ stats = self.get_err_counter(
+ '/err/nat44-ed-in2out/drops due to TCP in transitory timeout')
+ self.assertEqual(stats - in2out_drops, 1)
+
+ self.sleep(3)
+ # extra ACK packet in -> out - this will cause session to be wiped
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=in_port, dport=ext_port,
+ flags="A", seq=101, ack=301))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.assert_nothing_captured()
+ sessions = self.vapi.nat44_user_session_dump(self.pg0.remote_ip4, 0)
+ self.assertEqual(len(sessions) - session_n, 0)
+
+ def test_tcp_session_close_simultaneous(self):
+ """ NAT44ED Close TCP session from inside network """
+
+ in_port = self.tcp_port_in
+ ext_port = 10505
+
+ self.nat_add_address(self.nat_addr)
+ self.nat_add_inside_interface(self.pg0)
+ self.nat_add_outside_interface(self.pg1)
+ self.nat_add_static_mapping(self.pg0.remote_ip4, self.nat_addr,
+ in_port, ext_port, proto=IP_PROTOS.tcp,
+ flags=self.config_flags.NAT_IS_TWICE_NAT)
+
+ sessions = self.vapi.nat44_user_session_dump(self.pg0.remote_ip4, 0)
+ session_n = len(sessions)
+
+ self.vapi.nat_set_timeouts(udp=300, tcp_established=7440,
+ tcp_transitory=2, icmp=5)
+
+ out_port = self.init_tcp_session(self.pg0, self.pg1, in_port, ext_port)
+
+ # FIN packet in -> out
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=in_port, dport=ext_port,
+ flags="FA", seq=100, ack=300))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(1)
+
+ # FIN packet out -> in
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ TCP(sport=ext_port, dport=out_port,
+ flags="FA", seq=300, ack=100))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg0.get_capture(1)
+
+ # ACK packet in -> out
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=in_port, dport=ext_port,
+ flags="A", seq=101, ack=301))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(1)
+
+ # ACK packet out -> in
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ TCP(sport=ext_port, dport=out_port,
+ flags="A", seq=301, ack=101))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg0.get_capture(1)
+
+ sessions = self.vapi.nat44_user_session_dump(self.pg0.remote_ip4, 0)
+ self.assertEqual(len(sessions) - session_n, 1)
+
+ out2in_drops = self.get_err_counter(
+ '/err/nat44-ed-out2in/drops due to TCP in transitory timeout')
+ in2out_drops = self.get_err_counter(
+ '/err/nat44-ed-in2out/drops due to TCP in transitory timeout')
+
+ # extra FIN packet out -> in - this should be dropped
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ TCP(sport=ext_port, dport=out_port,
+ flags="FA", seq=300, ack=101))
+
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg0.assert_nothing_captured()
+
+ # extra ACK packet in -> out - this should be dropped
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=in_port, dport=ext_port,
+ flags="A", seq=101, ack=301))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.assert_nothing_captured()
+
+ stats = self.get_err_counter(
+ '/err/nat44-ed-out2in/drops due to TCP in transitory timeout')
+ self.assertEqual(stats - out2in_drops, 1)
+ stats = self.get_err_counter(
+ '/err/nat44-ed-in2out/drops due to TCP in transitory timeout')
+ self.assertEqual(stats - in2out_drops, 1)
+
+ self.sleep(3)
+ # extra ACK packet in -> out - this will cause session to be wiped
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=in_port, dport=ext_port,
+ flags="A", seq=101, ack=301))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.assert_nothing_captured()
+ sessions = self.vapi.nat44_user_session_dump(self.pg0.remote_ip4, 0)
+ self.assertEqual(len(sessions) - session_n, 0)
+
+ def test_dynamic_vrf(self):
+ """ NAT44ED dynamic translation test: different VRF"""
+
+ vrf_id_in = 33
+ vrf_id_out = 34
+
+ self.nat_add_address(self.nat_addr, vrf_id=vrf_id_in)
+
+ try:
+ self.configure_ip4_interface(self.pg7, table_id=vrf_id_in)
+ self.configure_ip4_interface(self.pg8, table_id=vrf_id_out)
+
+ self.nat_add_inside_interface(self.pg7)
+ self.nat_add_outside_interface(self.pg8)
+
+ # just basic stuff nothing special
+ pkts = self.create_stream_in(self.pg7, self.pg8)
+ self.pg7.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg8.get_capture(len(pkts))
+ self.verify_capture_out(capture, ignore_port=True)
+
+ pkts = self.create_stream_out(self.pg8)
+ self.pg8.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg7.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg7)
+
+ finally:
+ self.pg7.unconfig()
+ self.pg8.unconfig()
+
+ self.vapi.ip_table_add_del(is_add=0,
+ table={'table_id': vrf_id_in})
+ self.vapi.ip_table_add_del(is_add=0,
+ table={'table_id': vrf_id_out})
+
+ def test_dynamic_output_feature_vrf(self):
+ """ NAT44ED dynamic translation test: output-feature, VRF"""
+
+ # other then default (0)
+ new_vrf_id = 22
+
+ self.nat_add_address(self.nat_addr)
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat44_interface_add_del_output_feature(
+ sw_if_index=self.pg7.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_interface_add_del_output_feature(
+ sw_if_index=self.pg8.sw_if_index,
+ is_add=1)
+
+ try:
+ self.configure_ip4_interface(self.pg7, table_id=new_vrf_id)
+ self.configure_ip4_interface(self.pg8, table_id=new_vrf_id)
+
+ # in2out
+ tcpn = self.statistics['/nat44-ed/in2out/slowpath/tcp']
+ udpn = self.statistics['/nat44-ed/in2out/slowpath/udp']
+ icmpn = self.statistics['/nat44-ed/in2out/slowpath/icmp']
+ drops = self.statistics['/nat44-ed/in2out/slowpath/drops']
+
+ pkts = self.create_stream_in(self.pg7, self.pg8)
+ self.pg7.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg8.get_capture(len(pkts))
+ self.verify_capture_out(capture, ignore_port=True)
+
+ if_idx = self.pg7.sw_if_index
+ cnt = self.statistics['/nat44-ed/in2out/slowpath/tcp']
+ self.assertEqual(cnt[:, if_idx].sum() - tcpn[:, if_idx].sum(), 2)
+ cnt = self.statistics['/nat44-ed/in2out/slowpath/udp']
+ self.assertEqual(cnt[:, if_idx].sum() - udpn[:, if_idx].sum(), 1)
+ cnt = self.statistics['/nat44-ed/in2out/slowpath/icmp']
+ self.assertEqual(cnt[:, if_idx].sum() - icmpn[:, if_idx].sum(), 1)
+ cnt = self.statistics['/nat44-ed/in2out/slowpath/drops']
+ self.assertEqual(cnt[:, if_idx].sum() - drops[:, if_idx].sum(), 0)
+
+ # out2in
+ tcpn = self.statistics['/nat44-ed/out2in/fastpath/tcp']
+ udpn = self.statistics['/nat44-ed/out2in/fastpath/udp']
+ icmpn = self.statistics['/nat44-ed/out2in/fastpath/icmp']
+ drops = self.statistics['/nat44-ed/out2in/fastpath/drops']
+
+ pkts = self.create_stream_out(self.pg8)
+ self.pg8.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg7.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg7)
+
+ if_idx = self.pg8.sw_if_index
+ cnt = self.statistics['/nat44-ed/out2in/fastpath/tcp']
+ self.assertEqual(cnt[:, if_idx].sum() - tcpn[:, if_idx].sum(), 2)
+ cnt = self.statistics['/nat44-ed/out2in/fastpath/udp']
+ self.assertEqual(cnt[:, if_idx].sum() - udpn[:, if_idx].sum(), 1)
+ cnt = self.statistics['/nat44-ed/out2in/fastpath/icmp']
+ self.assertEqual(cnt[:, if_idx].sum() - icmpn[:, if_idx].sum(), 1)
+ cnt = self.statistics['/nat44-ed/out2in/fastpath/drops']
+ self.assertEqual(cnt[:, if_idx].sum() - drops[:, if_idx].sum(), 0)
+
+ sessions = self.statistics['/nat44-ed/total-sessions']
+ self.assertEqual(sessions[:, 0].sum(), 3)
+
+ finally:
+ self.pg7.unconfig()
+ self.pg8.unconfig()
+
+ self.vapi.ip_table_add_del(is_add=0,
+ table={'table_id': new_vrf_id})
+
+ def test_next_src_nat(self):
+ """ NAT44ED On way back forward packet to nat44-in2out node. """
+
+ twice_nat_addr = '10.0.1.3'
+ external_port = 80
+ local_port = 8080
+ post_twice_nat_port = 0
+
+ self.vapi.nat44_forwarding_enable_disable(enable=1)
+ self.nat_add_address(twice_nat_addr, twice_nat=1)
+ flags = (self.config_flags.NAT_IS_OUT2IN_ONLY |
+ self.config_flags.NAT_IS_SELF_TWICE_NAT)
+ self.nat_add_static_mapping(self.pg6.remote_ip4, self.pg1.remote_ip4,
+ local_port, external_port,
+ proto=IP_PROTOS.tcp, vrf_id=1,
+ flags=flags)
+ self.vapi.nat44_interface_add_del_feature(
+ sw_if_index=self.pg6.sw_if_index,
+ is_add=1)
+
+ p = (Ether(src=self.pg6.remote_mac, dst=self.pg6.local_mac) /
+ IP(src=self.pg6.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=12345, dport=external_port))
+ self.pg6.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg6.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, twice_nat_addr)
+ self.assertNotEqual(tcp.sport, 12345)
+ post_twice_nat_port = tcp.sport
+ self.assertEqual(ip.dst, self.pg6.remote_ip4)
+ self.assertEqual(tcp.dport, local_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ p = (Ether(src=self.pg6.remote_mac, dst=self.pg6.local_mac) /
+ IP(src=self.pg6.remote_ip4, dst=twice_nat_addr) /
+ TCP(sport=local_port, dport=post_twice_nat_port))
+ self.pg6.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg6.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.pg1.remote_ip4)
+ self.assertEqual(tcp.sport, external_port)
+ self.assertEqual(ip.dst, self.pg6.remote_ip4)
+ self.assertEqual(tcp.dport, 12345)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ def test_one_armed_nat44_static(self):
+ """ NAT44ED One armed NAT and 1:1 NAPT asymmetrical rule """
+
+ remote_host = self.pg4.remote_hosts[0]
+ local_host = self.pg4.remote_hosts[1]
+ external_port = 80
+ local_port = 8080
+ eh_port_in = 0
+
+ self.vapi.nat44_forwarding_enable_disable(enable=1)
+ self.nat_add_address(self.nat_addr, twice_nat=1)
+ flags = (self.config_flags.NAT_IS_OUT2IN_ONLY |
+ self.config_flags.NAT_IS_TWICE_NAT)
+ self.nat_add_static_mapping(local_host.ip4, self.nat_addr,
+ local_port, external_port,
+ proto=IP_PROTOS.tcp, flags=flags)
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat44_interface_add_del_feature(
+ sw_if_index=self.pg4.sw_if_index,
+ is_add=1)
+ self.vapi.nat44_interface_add_del_feature(
+ sw_if_index=self.pg4.sw_if_index,
+ flags=flags, is_add=1)
+
+ # from client to service
+ p = (Ether(src=self.pg4.remote_mac, dst=self.pg4.local_mac) /
+ IP(src=remote_host.ip4, dst=self.nat_addr) /
+ TCP(sport=12345, dport=external_port))
+ self.pg4.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg4.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.dst, local_host.ip4)
+ self.assertEqual(ip.src, self.nat_addr)
+ self.assertEqual(tcp.dport, local_port)
+ self.assertNotEqual(tcp.sport, 12345)
+ eh_port_in = tcp.sport
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # from service back to client
+ p = (Ether(src=self.pg4.remote_mac, dst=self.pg4.local_mac) /
+ IP(src=local_host.ip4, dst=self.nat_addr) /
+ TCP(sport=local_port, dport=eh_port_in))
+ self.pg4.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg4.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.nat_addr)
+ self.assertEqual(ip.dst, remote_host.ip4)
+ self.assertEqual(tcp.sport, external_port)
+ self.assertEqual(tcp.dport, 12345)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_nat44_ei.py b/test/test_nat44_ei.py
new file mode 100644
index 00000000000..4160ea2c344
--- /dev/null
+++ b/test/test_nat44_ei.py
@@ -0,0 +1,4280 @@
+#!/usr/bin/env python3
+
+import ipaddress
+import random
+import socket
+import struct
+import unittest
+from io import BytesIO
+from time import sleep
+
+import scapy.compat
+from framework import VppTestCase, VppTestRunner
+from ipfix import IPFIX, Set, Template, Data, IPFIXDecoder
+from scapy.all import bind_layers, Packet, ByteEnumField, ShortField, \
+ IPField, IntField, LongField, XByteField, FlagsField, FieldLenField, \
+ PacketListField
+from scapy.data import IP_PROTOS
+from scapy.layers.inet import IP, TCP, UDP, ICMP
+from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror
+from scapy.layers.inet6 import IPv6, ICMPv6EchoRequest, ICMPv6EchoReply
+from scapy.layers.l2 import Ether, ARP, GRE
+from scapy.packet import Raw
+from syslog_rfc5424_parser import SyslogMessage, ParseError
+from syslog_rfc5424_parser.constants import SyslogSeverity
+from util import ppp
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_neighbor import VppNeighbor
+from vpp_papi import VppEnum
+
+
+# NAT HA protocol event data
+class Event(Packet):
+ name = "Event"
+ fields_desc = [ByteEnumField("event_type", None,
+ {1: "add", 2: "del", 3: "refresh"}),
+ ByteEnumField("protocol", None,
+ {0: "other", 1: "udp", 2: "tcp", 3: "icmp"}),
+ ShortField("flags", 0),
+ IPField("in_addr", None),
+ IPField("out_addr", None),
+ ShortField("in_port", None),
+ ShortField("out_port", None),
+ IPField("eh_addr", None),
+ IPField("ehn_addr", None),
+ ShortField("eh_port", None),
+ ShortField("ehn_port", None),
+ IntField("fib_index", None),
+ IntField("total_pkts", 0),
+ LongField("total_bytes", 0)]
+
+ def extract_padding(self, s):
+ return "", s
+
+
+# NAT HA protocol header
+class HANATStateSync(Packet):
+ name = "HA NAT state sync"
+ fields_desc = [XByteField("version", 1),
+ FlagsField("flags", 0, 8, ['ACK']),
+ FieldLenField("count", None, count_of="events"),
+ IntField("sequence_number", 1),
+ IntField("thread_index", 0),
+ PacketListField("events", [], Event,
+ count_from=lambda pkt: pkt.count)]
+
+
+class MethodHolder(VppTestCase):
+ """ NAT create capture and verify method holder """
+
+ @property
+ def config_flags(self):
+ return VppEnum.vl_api_nat44_ei_config_flags_t
+
+ @property
+ def SYSLOG_SEVERITY(self):
+ return VppEnum.vl_api_syslog_severity_t
+
+ def nat44_add_static_mapping(self, local_ip, external_ip='0.0.0.0',
+ local_port=0, external_port=0, vrf_id=0,
+ is_add=1, external_sw_if_index=0xFFFFFFFF,
+ proto=0, tag="", flags=0):
+ """
+ Add/delete NAT44EI static mapping
+
+ :param local_ip: Local IP address
+ :param external_ip: External IP address
+ :param local_port: Local port number (Optional)
+ :param external_port: External port number (Optional)
+ :param vrf_id: VRF ID (Default 0)
+ :param is_add: 1 if add, 0 if delete (Default add)
+ :param external_sw_if_index: External interface instead of IP address
+ :param proto: IP protocol (Mandatory if port specified)
+ :param tag: Opaque string tag
+ :param flags: NAT configuration flags
+ """
+
+ if not (local_port and external_port):
+ flags |= self.config_flags.NAT44_EI_ADDR_ONLY_MAPPING
+
+ self.vapi.nat44_ei_add_del_static_mapping(
+ is_add=is_add,
+ local_ip_address=local_ip,
+ external_ip_address=external_ip,
+ external_sw_if_index=external_sw_if_index,
+ local_port=local_port,
+ external_port=external_port,
+ vrf_id=vrf_id, protocol=proto,
+ flags=flags,
+ tag=tag)
+
+ def nat44_add_address(self, ip, is_add=1, vrf_id=0xFFFFFFFF):
+ """
+ Add/delete NAT44EI address
+
+ :param ip: IP address
+ :param is_add: 1 if add, 0 if delete (Default add)
+ """
+ self.vapi.nat44_ei_add_del_address_range(first_ip_address=ip,
+ last_ip_address=ip,
+ vrf_id=vrf_id,
+ is_add=is_add)
+
+ def create_routes_and_neigbors(self):
+ r1 = VppIpRoute(self, self.pg7.remote_ip4, 32,
+ [VppRoutePath(self.pg7.remote_ip4,
+ self.pg7.sw_if_index)])
+ r2 = VppIpRoute(self, self.pg8.remote_ip4, 32,
+ [VppRoutePath(self.pg8.remote_ip4,
+ self.pg8.sw_if_index)])
+ r1.add_vpp_config()
+ r2.add_vpp_config()
+
+ n1 = VppNeighbor(self,
+ self.pg7.sw_if_index,
+ self.pg7.remote_mac,
+ self.pg7.remote_ip4,
+ is_static=1)
+ n2 = VppNeighbor(self,
+ self.pg8.sw_if_index,
+ self.pg8.remote_mac,
+ self.pg8.remote_ip4,
+ is_static=1)
+ n1.add_vpp_config()
+ n2.add_vpp_config()
+
+ def create_stream_in(self, in_if, out_if, dst_ip=None, ttl=64):
+ """
+ Create packet stream for inside network
+
+ :param in_if: Inside interface
+ :param out_if: Outside interface
+ :param dst_ip: Destination address
+ :param ttl: TTL of generated packets
+ """
+ if dst_ip is None:
+ dst_ip = out_if.remote_ip4
+
+ pkts = []
+ # TCP
+ p = (Ether(dst=in_if.local_mac, src=in_if.remote_mac) /
+ IP(src=in_if.remote_ip4, dst=dst_ip, ttl=ttl) /
+ TCP(sport=self.tcp_port_in, dport=20))
+ pkts.extend([p, p])
+
+ # UDP
+ p = (Ether(dst=in_if.local_mac, src=in_if.remote_mac) /
+ IP(src=in_if.remote_ip4, dst=dst_ip, ttl=ttl) /
+ UDP(sport=self.udp_port_in, dport=20))
+ pkts.append(p)
+
+ # ICMP
+ p = (Ether(dst=in_if.local_mac, src=in_if.remote_mac) /
+ IP(src=in_if.remote_ip4, dst=dst_ip, ttl=ttl) /
+ ICMP(id=self.icmp_id_in, type='echo-request'))
+ pkts.append(p)
+
+ return pkts
+
+ def compose_ip6(self, ip4, pref, plen):
+ """
+ Compose IPv4-embedded IPv6 addresses
+
+ :param ip4: IPv4 address
+ :param pref: IPv6 prefix
+ :param plen: IPv6 prefix length
+ :returns: IPv4-embedded IPv6 addresses
+ """
+ pref_n = list(socket.inet_pton(socket.AF_INET6, pref))
+ ip4_n = list(socket.inet_pton(socket.AF_INET, ip4))
+ if plen == 32:
+ pref_n[4] = ip4_n[0]
+ pref_n[5] = ip4_n[1]
+ pref_n[6] = ip4_n[2]
+ pref_n[7] = ip4_n[3]
+ elif plen == 40:
+ pref_n[5] = ip4_n[0]
+ pref_n[6] = ip4_n[1]
+ pref_n[7] = ip4_n[2]
+ pref_n[9] = ip4_n[3]
+ elif plen == 48:
+ pref_n[6] = ip4_n[0]
+ pref_n[7] = ip4_n[1]
+ pref_n[9] = ip4_n[2]
+ pref_n[10] = ip4_n[3]
+ elif plen == 56:
+ pref_n[7] = ip4_n[0]
+ pref_n[9] = ip4_n[1]
+ pref_n[10] = ip4_n[2]
+ pref_n[11] = ip4_n[3]
+ elif plen == 64:
+ pref_n[9] = ip4_n[0]
+ pref_n[10] = ip4_n[1]
+ pref_n[11] = ip4_n[2]
+ pref_n[12] = ip4_n[3]
+ elif plen == 96:
+ pref_n[12] = ip4_n[0]
+ pref_n[13] = ip4_n[1]
+ pref_n[14] = ip4_n[2]
+ pref_n[15] = ip4_n[3]
+ packed_pref_n = b''.join([scapy.compat.chb(x) for x in pref_n])
+ return socket.inet_ntop(socket.AF_INET6, packed_pref_n)
+
+ def create_stream_out(self, out_if, dst_ip=None, ttl=64,
+ use_inside_ports=False):
+ """
+ Create packet stream for outside network
+
+ :param out_if: Outside interface
+ :param dst_ip: Destination IP address (Default use global NAT address)
+ :param ttl: TTL of generated packets
+ :param use_inside_ports: Use inside NAT ports as destination ports
+ instead of outside ports
+ """
+ if dst_ip is None:
+ dst_ip = self.nat_addr
+ if not use_inside_ports:
+ tcp_port = self.tcp_port_out
+ udp_port = self.udp_port_out
+ icmp_id = self.icmp_id_out
+ else:
+ tcp_port = self.tcp_port_in
+ udp_port = self.udp_port_in
+ icmp_id = self.icmp_id_in
+ pkts = []
+ # TCP
+ p = (Ether(dst=out_if.local_mac, src=out_if.remote_mac) /
+ IP(src=out_if.remote_ip4, dst=dst_ip, ttl=ttl) /
+ TCP(dport=tcp_port, sport=20))
+ pkts.extend([p, p])
+
+ # UDP
+ p = (Ether(dst=out_if.local_mac, src=out_if.remote_mac) /
+ IP(src=out_if.remote_ip4, dst=dst_ip, ttl=ttl) /
+ UDP(dport=udp_port, sport=20))
+ pkts.append(p)
+
+ # ICMP
+ p = (Ether(dst=out_if.local_mac, src=out_if.remote_mac) /
+ IP(src=out_if.remote_ip4, dst=dst_ip, ttl=ttl) /
+ ICMP(id=icmp_id, type='echo-reply'))
+ pkts.append(p)
+
+ return pkts
+
+ def create_stream_out_ip6(self, out_if, src_ip, dst_ip, hl=64):
+ """
+ Create packet stream for outside network
+
+ :param out_if: Outside interface
+ :param dst_ip: Destination IP address (Default use global NAT address)
+ :param hl: HL of generated packets
+ """
+ pkts = []
+ # TCP
+ p = (Ether(dst=out_if.local_mac, src=out_if.remote_mac) /
+ IPv6(src=src_ip, dst=dst_ip, hlim=hl) /
+ TCP(dport=self.tcp_port_out, sport=20))
+ pkts.append(p)
+
+ # UDP
+ p = (Ether(dst=out_if.local_mac, src=out_if.remote_mac) /
+ IPv6(src=src_ip, dst=dst_ip, hlim=hl) /
+ UDP(dport=self.udp_port_out, sport=20))
+ pkts.append(p)
+
+ # ICMP
+ p = (Ether(dst=out_if.local_mac, src=out_if.remote_mac) /
+ IPv6(src=src_ip, dst=dst_ip, hlim=hl) /
+ ICMPv6EchoReply(id=self.icmp_id_out))
+ pkts.append(p)
+
+ return pkts
+
+ def verify_capture_out(self, capture, nat_ip=None, same_port=False,
+ dst_ip=None, is_ip6=False, ignore_port=False):
+ """
+ Verify captured packets on outside network
+
+ :param capture: Captured packets
+ :param nat_ip: Translated IP address (Default use global NAT address)
+ :param same_port: Source port number is not translated (Default False)
+ :param dst_ip: Destination IP address (Default do not verify)
+ :param is_ip6: If L3 protocol is IPv6 (Default False)
+ """
+ if is_ip6:
+ IP46 = IPv6
+ ICMP46 = ICMPv6EchoRequest
+ else:
+ IP46 = IP
+ ICMP46 = ICMP
+ if nat_ip is None:
+ nat_ip = self.nat_addr
+ for packet in capture:
+ try:
+ if not is_ip6:
+ self.assert_packet_checksums_valid(packet)
+ self.assertEqual(packet[IP46].src, nat_ip)
+ if dst_ip is not None:
+ self.assertEqual(packet[IP46].dst, dst_ip)
+ if packet.haslayer(TCP):
+ if not ignore_port:
+ if same_port:
+ self.assertEqual(
+ packet[TCP].sport, self.tcp_port_in)
+ else:
+ self.assertNotEqual(
+ packet[TCP].sport, self.tcp_port_in)
+ self.tcp_port_out = packet[TCP].sport
+ self.assert_packet_checksums_valid(packet)
+ elif packet.haslayer(UDP):
+ if not ignore_port:
+ if same_port:
+ self.assertEqual(
+ packet[UDP].sport, self.udp_port_in)
+ else:
+ self.assertNotEqual(
+ packet[UDP].sport, self.udp_port_in)
+ self.udp_port_out = packet[UDP].sport
+ else:
+ if not ignore_port:
+ if same_port:
+ self.assertEqual(
+ packet[ICMP46].id, self.icmp_id_in)
+ else:
+ self.assertNotEqual(
+ packet[ICMP46].id, self.icmp_id_in)
+ self.icmp_id_out = packet[ICMP46].id
+ self.assert_packet_checksums_valid(packet)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet "
+ "(outside network):", packet))
+ raise
+
+ def verify_capture_out_ip6(self, capture, nat_ip, same_port=False,
+ dst_ip=None):
+ """
+ Verify captured packets on outside network
+
+ :param capture: Captured packets
+ :param nat_ip: Translated IP address
+ :param same_port: Source port number is not translated (Default False)
+ :param dst_ip: Destination IP address (Default do not verify)
+ """
+ return self.verify_capture_out(capture, nat_ip, same_port, dst_ip,
+ True)
+
+ def verify_capture_in(self, capture, in_if):
+ """
+ Verify captured packets on inside network
+
+ :param capture: Captured packets
+ :param in_if: Inside interface
+ """
+ for packet in capture:
+ try:
+ self.assert_packet_checksums_valid(packet)
+ self.assertEqual(packet[IP].dst, in_if.remote_ip4)
+ if packet.haslayer(TCP):
+ self.assertEqual(packet[TCP].dport, self.tcp_port_in)
+ elif packet.haslayer(UDP):
+ self.assertEqual(packet[UDP].dport, self.udp_port_in)
+ else:
+ self.assertEqual(packet[ICMP].id, self.icmp_id_in)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet "
+ "(inside network):", packet))
+ raise
+
+ def verify_capture_no_translation(self, capture, ingress_if, egress_if):
+ """
+ Verify captured packet that don't have to be translated
+
+ :param capture: Captured packets
+ :param ingress_if: Ingress interface
+ :param egress_if: Egress interface
+ """
+ for packet in capture:
+ try:
+ self.assertEqual(packet[IP].src, ingress_if.remote_ip4)
+ self.assertEqual(packet[IP].dst, egress_if.remote_ip4)
+ if packet.haslayer(TCP):
+ self.assertEqual(packet[TCP].sport, self.tcp_port_in)
+ elif packet.haslayer(UDP):
+ self.assertEqual(packet[UDP].sport, self.udp_port_in)
+ else:
+ self.assertEqual(packet[ICMP].id, self.icmp_id_in)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet "
+ "(inside network):", packet))
+ raise
+
+ def verify_capture_out_with_icmp_errors(self, capture, src_ip=None,
+ icmp_type=11):
+ """
+ Verify captured packets with ICMP errors on outside network
+
+ :param capture: Captured packets
+ :param src_ip: Translated IP address or IP address of VPP
+ (Default use global NAT address)
+ :param icmp_type: Type of error ICMP packet
+ we are expecting (Default 11)
+ """
+ if src_ip is None:
+ src_ip = self.nat_addr
+ for packet in capture:
+ try:
+ self.assertEqual(packet[IP].src, src_ip)
+ self.assertEqual(packet.haslayer(ICMP), 1)
+ icmp = packet[ICMP]
+ self.assertEqual(icmp.type, icmp_type)
+ self.assertTrue(icmp.haslayer(IPerror))
+ inner_ip = icmp[IPerror]
+ if inner_ip.haslayer(TCPerror):
+ self.assertEqual(inner_ip[TCPerror].dport,
+ self.tcp_port_out)
+ elif inner_ip.haslayer(UDPerror):
+ self.assertEqual(inner_ip[UDPerror].dport,
+ self.udp_port_out)
+ else:
+ self.assertEqual(inner_ip[ICMPerror].id, self.icmp_id_out)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet "
+ "(outside network):", packet))
+ raise
+
+ def verify_capture_in_with_icmp_errors(self, capture, in_if, icmp_type=11):
+ """
+ Verify captured packets with ICMP errors on inside network
+
+ :param capture: Captured packets
+ :param in_if: Inside interface
+ :param icmp_type: Type of error ICMP packet
+ we are expecting (Default 11)
+ """
+ for packet in capture:
+ try:
+ self.assertEqual(packet[IP].dst, in_if.remote_ip4)
+ self.assertEqual(packet.haslayer(ICMP), 1)
+ icmp = packet[ICMP]
+ self.assertEqual(icmp.type, icmp_type)
+ self.assertTrue(icmp.haslayer(IPerror))
+ inner_ip = icmp[IPerror]
+ if inner_ip.haslayer(TCPerror):
+ self.assertEqual(inner_ip[TCPerror].sport,
+ self.tcp_port_in)
+ elif inner_ip.haslayer(UDPerror):
+ self.assertEqual(inner_ip[UDPerror].sport,
+ self.udp_port_in)
+ else:
+ self.assertEqual(inner_ip[ICMPerror].id, self.icmp_id_in)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet "
+ "(inside network):", packet))
+ raise
+
+ def create_stream_frag(self, src_if, dst, sport, dport, data,
+ proto=IP_PROTOS.tcp, echo_reply=False):
+ """
+ Create fragmented packet stream
+
+ :param src_if: Source interface
+ :param dst: Destination IPv4 address
+ :param sport: Source port
+ :param dport: Destination port
+ :param data: Payload data
+ :param proto: protocol (TCP, UDP, ICMP)
+ :param echo_reply: use echo_reply if protocol is ICMP
+ :returns: Fragments
+ """
+ if proto == IP_PROTOS.tcp:
+ p = (IP(src=src_if.remote_ip4, dst=dst) /
+ TCP(sport=sport, dport=dport) /
+ Raw(data))
+ p = p.__class__(scapy.compat.raw(p))
+ chksum = p[TCP].chksum
+ proto_header = TCP(sport=sport, dport=dport, chksum=chksum)
+ elif proto == IP_PROTOS.udp:
+ proto_header = UDP(sport=sport, dport=dport)
+ elif proto == IP_PROTOS.icmp:
+ if not echo_reply:
+ proto_header = ICMP(id=sport, type='echo-request')
+ else:
+ proto_header = ICMP(id=sport, type='echo-reply')
+ else:
+ raise Exception("Unsupported protocol")
+ id = random.randint(0, 65535)
+ pkts = []
+ if proto == IP_PROTOS.tcp:
+ raw = Raw(data[0:4])
+ else:
+ raw = Raw(data[0:16])
+ p = (Ether(src=src_if.remote_mac, dst=src_if.local_mac) /
+ IP(src=src_if.remote_ip4, dst=dst, flags="MF", frag=0, id=id) /
+ proto_header /
+ raw)
+ pkts.append(p)
+ if proto == IP_PROTOS.tcp:
+ raw = Raw(data[4:20])
+ else:
+ raw = Raw(data[16:32])
+ p = (Ether(src=src_if.remote_mac, dst=src_if.local_mac) /
+ IP(src=src_if.remote_ip4, dst=dst, flags="MF", frag=3, id=id,
+ proto=proto) /
+ raw)
+ pkts.append(p)
+ if proto == IP_PROTOS.tcp:
+ raw = Raw(data[20:])
+ else:
+ raw = Raw(data[32:])
+ p = (Ether(src=src_if.remote_mac, dst=src_if.local_mac) /
+ IP(src=src_if.remote_ip4, dst=dst, frag=5, proto=proto,
+ id=id) /
+ raw)
+ pkts.append(p)
+ return pkts
+
+ def reass_frags_and_verify(self, frags, src, dst):
+ """
+ Reassemble and verify fragmented packet
+
+ :param frags: Captured fragments
+ :param src: Source IPv4 address to verify
+ :param dst: Destination IPv4 address to verify
+
+ :returns: Reassembled IPv4 packet
+ """
+ buffer = BytesIO()
+ for p in frags:
+ self.assertEqual(p[IP].src, src)
+ self.assertEqual(p[IP].dst, dst)
+ self.assert_ip_checksum_valid(p)
+ buffer.seek(p[IP].frag * 8)
+ buffer.write(bytes(p[IP].payload))
+ ip = IP(src=frags[0][IP].src, dst=frags[0][IP].dst,
+ proto=frags[0][IP].proto)
+ if ip.proto == IP_PROTOS.tcp:
+ p = (ip / TCP(buffer.getvalue()))
+ self.logger.debug(ppp("Reassembled:", p))
+ self.assert_tcp_checksum_valid(p)
+ elif ip.proto == IP_PROTOS.udp:
+ p = (ip / UDP(buffer.getvalue()[:8]) /
+ Raw(buffer.getvalue()[8:]))
+ elif ip.proto == IP_PROTOS.icmp:
+ p = (ip / ICMP(buffer.getvalue()))
+ return p
+
+ def verify_ipfix_nat44_ses(self, data):
+ """
+ Verify IPFIX NAT44EI session create/delete event
+
+ :param data: Decoded IPFIX data records
+ """
+ nat44_ses_create_num = 0
+ nat44_ses_delete_num = 0
+ self.assertEqual(6, len(data))
+ for record in data:
+ # natEvent
+ self.assertIn(scapy.compat.orb(record[230]), [4, 5])
+ if scapy.compat.orb(record[230]) == 4:
+ nat44_ses_create_num += 1
+ else:
+ nat44_ses_delete_num += 1
+ # sourceIPv4Address
+ self.assertEqual(self.pg0.remote_ip4,
+ str(ipaddress.IPv4Address(record[8])))
+ # postNATSourceIPv4Address
+ self.assertEqual(socket.inet_pton(socket.AF_INET, self.nat_addr),
+ record[225])
+ # ingressVRFID
+ self.assertEqual(struct.pack("!I", 0), record[234])
+ # protocolIdentifier/sourceTransportPort
+ # /postNAPTSourceTransportPort
+ if IP_PROTOS.icmp == scapy.compat.orb(record[4]):
+ self.assertEqual(struct.pack("!H", self.icmp_id_in), record[7])
+ self.assertEqual(struct.pack("!H", self.icmp_id_out),
+ record[227])
+ elif IP_PROTOS.tcp == scapy.compat.orb(record[4]):
+ self.assertEqual(struct.pack("!H", self.tcp_port_in),
+ record[7])
+ self.assertEqual(struct.pack("!H", self.tcp_port_out),
+ record[227])
+ elif IP_PROTOS.udp == scapy.compat.orb(record[4]):
+ self.assertEqual(struct.pack("!H", self.udp_port_in),
+ record[7])
+ self.assertEqual(struct.pack("!H", self.udp_port_out),
+ record[227])
+ else:
+ self.fail("Invalid protocol")
+ self.assertEqual(3, nat44_ses_create_num)
+ self.assertEqual(3, nat44_ses_delete_num)
+
+ def verify_ipfix_addr_exhausted(self, data):
+ self.assertEqual(1, len(data))
+ record = data[0]
+ # natEvent
+ self.assertEqual(scapy.compat.orb(record[230]), 3)
+ # natPoolID
+ self.assertEqual(struct.pack("!I", 0), record[283])
+
+ def verify_ipfix_max_sessions(self, data, limit):
+ self.assertEqual(1, len(data))
+ record = data[0]
+ # natEvent
+ self.assertEqual(scapy.compat.orb(record[230]), 13)
+ # natQuotaExceededEvent
+ self.assertEqual(struct.pack("!I", 1), record[466])
+ # maxSessionEntries
+ self.assertEqual(struct.pack("!I", limit), record[471])
+
+ def verify_no_nat44_user(self):
+ """ Verify that there is no NAT44EI user """
+ users = self.vapi.nat44_ei_user_dump()
+ self.assertEqual(len(users), 0)
+ users = self.statistics['/nat44-ei/total-users']
+ self.assertEqual(users[0][0], 0)
+ sessions = self.statistics['/nat44-ei/total-sessions']
+ self.assertEqual(sessions[0][0], 0)
+
+ def verify_syslog_apmap(self, data, is_add=True):
+ message = data.decode('utf-8')
+ try:
+ message = SyslogMessage.parse(message)
+ except ParseError as e:
+ self.logger.error(e)
+ raise
+ else:
+ self.assertEqual(message.severity, SyslogSeverity.info)
+ self.assertEqual(message.appname, 'NAT')
+ self.assertEqual(message.msgid, 'APMADD' if is_add else 'APMDEL')
+ sd_params = message.sd.get('napmap')
+ self.assertTrue(sd_params is not None)
+ self.assertEqual(sd_params.get('IATYP'), 'IPv4')
+ self.assertEqual(sd_params.get('ISADDR'), self.pg0.remote_ip4)
+ self.assertEqual(sd_params.get('ISPORT'), "%d" % self.tcp_port_in)
+ self.assertEqual(sd_params.get('XATYP'), 'IPv4')
+ self.assertEqual(sd_params.get('XSADDR'), self.nat_addr)
+ self.assertEqual(sd_params.get('XSPORT'), "%d" % self.tcp_port_out)
+ self.assertEqual(sd_params.get('PROTO'), "%d" % IP_PROTOS.tcp)
+ self.assertTrue(sd_params.get('SSUBIX') is not None)
+ self.assertEqual(sd_params.get('SVLAN'), '0')
+
+ def verify_mss_value(self, pkt, mss):
+ if not pkt.haslayer(IP) or not pkt.haslayer(TCP):
+ raise TypeError("Not a TCP/IP packet")
+
+ for option in pkt[TCP].options:
+ if option[0] == 'MSS':
+ self.assertEqual(option[1], mss)
+ self.assert_tcp_checksum_valid(pkt)
+
+ @staticmethod
+ def proto2layer(proto):
+ if proto == IP_PROTOS.tcp:
+ return TCP
+ elif proto == IP_PROTOS.udp:
+ return UDP
+ elif proto == IP_PROTOS.icmp:
+ return ICMP
+ else:
+ raise Exception("Unsupported protocol")
+
+ def frag_in_order(self, proto=IP_PROTOS.tcp, dont_translate=False,
+ ignore_port=False):
+ layer = self.proto2layer(proto)
+
+ if proto == IP_PROTOS.tcp:
+ data = b"A" * 4 + b"B" * 16 + b"C" * 3
+ else:
+ data = b"A" * 16 + b"B" * 16 + b"C" * 3
+ self.port_in = random.randint(1025, 65535)
+
+ # in2out
+ pkts = self.create_stream_frag(self.pg0, self.pg1.remote_ip4,
+ self.port_in, 20, data, proto)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ frags = self.pg1.get_capture(len(pkts))
+ if not dont_translate:
+ p = self.reass_frags_and_verify(frags,
+ self.nat_addr,
+ self.pg1.remote_ip4)
+ else:
+ p = self.reass_frags_and_verify(frags,
+ self.pg0.remote_ip4,
+ self.pg1.remote_ip4)
+ if proto != IP_PROTOS.icmp:
+ if not dont_translate:
+ self.assertEqual(p[layer].dport, 20)
+ if not ignore_port:
+ self.assertNotEqual(p[layer].sport, self.port_in)
+ else:
+ self.assertEqual(p[layer].sport, self.port_in)
+ else:
+ if not ignore_port:
+ if not dont_translate:
+ self.assertNotEqual(p[layer].id, self.port_in)
+ else:
+ self.assertEqual(p[layer].id, self.port_in)
+ self.assertEqual(data, p[Raw].load)
+
+ # out2in
+ if not dont_translate:
+ dst_addr = self.nat_addr
+ else:
+ dst_addr = self.pg0.remote_ip4
+ if proto != IP_PROTOS.icmp:
+ sport = 20
+ dport = p[layer].sport
+ else:
+ sport = p[layer].id
+ dport = 0
+ pkts = self.create_stream_frag(self.pg1, dst_addr, sport, dport, data,
+ proto, echo_reply=True)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ frags = self.pg0.get_capture(len(pkts))
+ p = self.reass_frags_and_verify(frags,
+ self.pg1.remote_ip4,
+ self.pg0.remote_ip4)
+ if proto != IP_PROTOS.icmp:
+ self.assertEqual(p[layer].sport, 20)
+ self.assertEqual(p[layer].dport, self.port_in)
+ else:
+ self.assertEqual(p[layer].id, self.port_in)
+ self.assertEqual(data, p[Raw].load)
+
+ def reass_hairpinning(self, server_addr, server_in_port, server_out_port,
+ host_in_port, proto=IP_PROTOS.tcp,
+ ignore_port=False):
+
+ layer = self.proto2layer(proto)
+
+ if proto == IP_PROTOS.tcp:
+ data = b"A" * 4 + b"B" * 16 + b"C" * 3
+ else:
+ data = b"A" * 16 + b"B" * 16 + b"C" * 3
+
+ # send packet from host to server
+ pkts = self.create_stream_frag(self.pg0,
+ self.nat_addr,
+ host_in_port,
+ server_out_port,
+ data,
+ proto)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ frags = self.pg0.get_capture(len(pkts))
+ p = self.reass_frags_and_verify(frags,
+ self.nat_addr,
+ server_addr)
+ if proto != IP_PROTOS.icmp:
+ if not ignore_port:
+ self.assertNotEqual(p[layer].sport, host_in_port)
+ self.assertEqual(p[layer].dport, server_in_port)
+ else:
+ if not ignore_port:
+ self.assertNotEqual(p[layer].id, host_in_port)
+ self.assertEqual(data, p[Raw].load)
+
+ def frag_out_of_order(self, proto=IP_PROTOS.tcp, dont_translate=False,
+ ignore_port=False):
+ layer = self.proto2layer(proto)
+
+ if proto == IP_PROTOS.tcp:
+ data = b"A" * 4 + b"B" * 16 + b"C" * 3
+ else:
+ data = b"A" * 16 + b"B" * 16 + b"C" * 3
+ self.port_in = random.randint(1025, 65535)
+
+ for i in range(2):
+ # in2out
+ pkts = self.create_stream_frag(self.pg0, self.pg1.remote_ip4,
+ self.port_in, 20, data, proto)
+ pkts.reverse()
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ frags = self.pg1.get_capture(len(pkts))
+ if not dont_translate:
+ p = self.reass_frags_and_verify(frags,
+ self.nat_addr,
+ self.pg1.remote_ip4)
+ else:
+ p = self.reass_frags_and_verify(frags,
+ self.pg0.remote_ip4,
+ self.pg1.remote_ip4)
+ if proto != IP_PROTOS.icmp:
+ if not dont_translate:
+ self.assertEqual(p[layer].dport, 20)
+ if not ignore_port:
+ self.assertNotEqual(p[layer].sport, self.port_in)
+ else:
+ self.assertEqual(p[layer].sport, self.port_in)
+ else:
+ if not ignore_port:
+ if not dont_translate:
+ self.assertNotEqual(p[layer].id, self.port_in)
+ else:
+ self.assertEqual(p[layer].id, self.port_in)
+ self.assertEqual(data, p[Raw].load)
+
+ # out2in
+ if not dont_translate:
+ dst_addr = self.nat_addr
+ else:
+ dst_addr = self.pg0.remote_ip4
+ if proto != IP_PROTOS.icmp:
+ sport = 20
+ dport = p[layer].sport
+ else:
+ sport = p[layer].id
+ dport = 0
+ pkts = self.create_stream_frag(self.pg1, dst_addr, sport, dport,
+ data, proto, echo_reply=True)
+ pkts.reverse()
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ frags = self.pg0.get_capture(len(pkts))
+ p = self.reass_frags_and_verify(frags,
+ self.pg1.remote_ip4,
+ self.pg0.remote_ip4)
+ if proto != IP_PROTOS.icmp:
+ self.assertEqual(p[layer].sport, 20)
+ self.assertEqual(p[layer].dport, self.port_in)
+ else:
+ self.assertEqual(p[layer].id, self.port_in)
+ self.assertEqual(data, p[Raw].load)
+
+
+def get_nat44_ei_in2out_worker_index(ip, vpp_worker_count):
+ if 0 == vpp_worker_count:
+ return 0
+ numeric = socket.inet_aton(ip)
+ numeric = struct.unpack("!L", numeric)[0]
+ numeric = socket.htonl(numeric)
+ h = numeric + (numeric >> 8) + (numeric >> 16) + (numeric >> 24)
+ return 1 + h % vpp_worker_count
+
+
+class TestNAT44EI(MethodHolder):
+ """ NAT44EI Test Cases """
+
+ max_translations = 10240
+ max_users = 10240
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestNAT44EI, cls).setUpClass()
+ cls.vapi.cli("set log class nat44-ei level debug")
+
+ cls.tcp_port_in = 6303
+ cls.tcp_port_out = 6303
+ cls.udp_port_in = 6304
+ cls.udp_port_out = 6304
+ cls.icmp_id_in = 6305
+ cls.icmp_id_out = 6305
+ cls.nat_addr = '10.0.0.3'
+ cls.ipfix_src_port = 4739
+ cls.ipfix_domain_id = 1
+ cls.tcp_external_port = 80
+ cls.udp_external_port = 69
+
+ cls.create_pg_interfaces(range(10))
+ cls.interfaces = list(cls.pg_interfaces[0:4])
+
+ for i in cls.interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+
+ cls.pg0.generate_remote_hosts(3)
+ cls.pg0.configure_ipv4_neighbors()
+
+ cls.pg1.generate_remote_hosts(1)
+ cls.pg1.configure_ipv4_neighbors()
+
+ cls.overlapping_interfaces = list(list(cls.pg_interfaces[4:7]))
+ cls.vapi.ip_table_add_del(is_add=1, table={'table_id': 10})
+ cls.vapi.ip_table_add_del(is_add=1, table={'table_id': 20})
+
+ cls.pg4._local_ip4 = "172.16.255.1"
+ cls.pg4._remote_hosts[0]._ip4 = "172.16.255.2"
+ cls.pg4.set_table_ip4(10)
+ cls.pg5._local_ip4 = "172.17.255.3"
+ cls.pg5._remote_hosts[0]._ip4 = "172.17.255.4"
+ cls.pg5.set_table_ip4(10)
+ cls.pg6._local_ip4 = "172.16.255.1"
+ cls.pg6._remote_hosts[0]._ip4 = "172.16.255.2"
+ cls.pg6.set_table_ip4(20)
+ for i in cls.overlapping_interfaces:
+ i.config_ip4()
+ i.admin_up()
+ i.resolve_arp()
+
+ cls.pg7.admin_up()
+ cls.pg8.admin_up()
+
+ cls.pg9.generate_remote_hosts(2)
+ cls.pg9.config_ip4()
+ cls.vapi.sw_interface_add_del_address(
+ sw_if_index=cls.pg9.sw_if_index,
+ prefix="10.0.0.1/24")
+
+ cls.pg9.admin_up()
+ cls.pg9.resolve_arp()
+ cls.pg9._remote_hosts[1]._ip4 = cls.pg9._remote_hosts[0]._ip4
+ cls.pg4._remote_ip4 = cls.pg9._remote_hosts[0]._ip4 = "10.0.0.2"
+ cls.pg9.resolve_arp()
+
+ def plugin_enable(self):
+ self.vapi.nat44_ei_plugin_enable_disable(
+ sessions=self.max_translations,
+ users=self.max_users, enable=1)
+
+ def setUp(self):
+ super(TestNAT44EI, self).setUp()
+ self.plugin_enable()
+
+ def tearDown(self):
+ super(TestNAT44EI, self).tearDown()
+ if not self.vpp_dead:
+ self.vapi.nat44_ei_ipfix_enable_disable(
+ domain_id=self.ipfix_domain_id, src_port=self.ipfix_src_port,
+ enable=0)
+ self.ipfix_src_port = 4739
+ self.ipfix_domain_id = 1
+
+ self.vapi.nat44_ei_plugin_enable_disable(enable=0)
+ self.vapi.cli("clear logging")
+
+ def test_clear_sessions(self):
+ """ NAT44EI session clearing test """
+
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture)
+
+ sessions = self.statistics['/nat44-ei/total-sessions']
+ self.assertGreater(sessions[:, 0].sum(), 0, "Session count invalid")
+ self.logger.info("sessions before clearing: %s" % sessions[0][0])
+
+ self.vapi.cli("clear nat44 ei sessions")
+
+ sessions = self.statistics['/nat44-ei/total-sessions']
+ self.assertEqual(sessions[:, 0].sum(), 0, "Session count invalid")
+ self.logger.info("sessions after clearing: %s" % sessions[0][0])
+
+ def test_dynamic(self):
+ """ NAT44EI dynamic translation test """
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ # in2out
+ tcpn = self.statistics['/nat44-ei/in2out/slowpath/tcp']
+ udpn = self.statistics['/nat44-ei/in2out/slowpath/udp']
+ icmpn = self.statistics['/nat44-ei/in2out/slowpath/icmp']
+ drops = self.statistics['/nat44-ei/in2out/slowpath/drops']
+
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture)
+
+ if_idx = self.pg0.sw_if_index
+ cnt = self.statistics['/nat44-ei/in2out/slowpath/tcp']
+ self.assertEqual(cnt[:, if_idx].sum() - tcpn[:, if_idx].sum(), 2)
+ cnt = self.statistics['/nat44-ei/in2out/slowpath/udp']
+ self.assertEqual(cnt[:, if_idx].sum() - udpn[:, if_idx].sum(), 1)
+ cnt = self.statistics['/nat44-ei/in2out/slowpath/icmp']
+ self.assertEqual(cnt[:, if_idx].sum() - icmpn[:, if_idx].sum(), 1)
+ cnt = self.statistics['/nat44-ei/in2out/slowpath/drops']
+ self.assertEqual(cnt[:, if_idx].sum() - drops[:, if_idx].sum(), 0)
+
+ # out2in
+ tcpn = self.statistics['/nat44-ei/out2in/slowpath/tcp']
+ udpn = self.statistics['/nat44-ei/out2in/slowpath/udp']
+ icmpn = self.statistics['/nat44-ei/out2in/slowpath/icmp']
+ drops = self.statistics['/nat44-ei/out2in/slowpath/drops']
+
+ pkts = self.create_stream_out(self.pg1)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg0)
+
+ if_idx = self.pg1.sw_if_index
+ cnt = self.statistics['/nat44-ei/out2in/slowpath/tcp']
+ self.assertEqual(cnt[:, if_idx].sum() - tcpn[:, if_idx].sum(), 2)
+ cnt = self.statistics['/nat44-ei/out2in/slowpath/udp']
+ self.assertEqual(cnt[:, if_idx].sum() - udpn[:, if_idx].sum(), 1)
+ cnt = self.statistics['/nat44-ei/out2in/slowpath/icmp']
+ self.assertEqual(cnt[:, if_idx].sum() - icmpn[:, if_idx].sum(), 1)
+ cnt = self.statistics['/nat44-ei/out2in/slowpath/drops']
+ self.assertEqual(cnt[:, if_idx].sum() - drops[:, if_idx].sum(), 0)
+
+ users = self.statistics['/nat44-ei/total-users']
+ self.assertEqual(users[:, 0].sum(), 1)
+ sessions = self.statistics['/nat44-ei/total-sessions']
+ self.assertEqual(sessions[:, 0].sum(), 3)
+
+ def test_dynamic_icmp_errors_in2out_ttl_1(self):
+ """ NAT44EI handling of client packets with TTL=1 """
+
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ # Client side - generate traffic
+ pkts = self.create_stream_in(self.pg0, self.pg1, ttl=1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # Client side - verify ICMP type 11 packets
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in_with_icmp_errors(capture, self.pg0)
+
+ def test_dynamic_icmp_errors_out2in_ttl_1(self):
+ """ NAT44EI handling of server packets with TTL=1 """
+
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ # Client side - create sessions
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # Server side - generate traffic
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture)
+ pkts = self.create_stream_out(self.pg1, ttl=1)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # Server side - verify ICMP type 11 packets
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out_with_icmp_errors(capture,
+ src_ip=self.pg1.local_ip4)
+
+ def test_dynamic_icmp_errors_in2out_ttl_2(self):
+ """ NAT44EI handling of error responses to client packets with TTL=2
+ """
+
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ # Client side - generate traffic
+ pkts = self.create_stream_in(self.pg0, self.pg1, ttl=2)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # Server side - simulate ICMP type 11 response
+ capture = self.pg1.get_capture(len(pkts))
+ pkts = [Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ ICMP(type=11) / packet[IP] for packet in capture]
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # Client side - verify ICMP type 11 packets
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in_with_icmp_errors(capture, self.pg0)
+
+ def test_dynamic_icmp_errors_out2in_ttl_2(self):
+ """ NAT44EI handling of error responses to server packets with TTL=2
+ """
+
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ # Client side - create sessions
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # Server side - generate traffic
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture)
+ pkts = self.create_stream_out(self.pg1, ttl=2)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # Client side - simulate ICMP type 11 response
+ capture = self.pg0.get_capture(len(pkts))
+ pkts = [Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ ICMP(type=11) / packet[IP] for packet in capture]
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # Server side - verify ICMP type 11 packets
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out_with_icmp_errors(capture)
+
+ def test_ping_out_interface_from_outside(self):
+ """ NAT44EI ping out interface from outside network """
+
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.pg1.local_ip4) /
+ ICMP(id=self.icmp_id_out, type='echo-request'))
+ pkts = [p]
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ packet = capture[0]
+ try:
+ self.assertEqual(packet[IP].src, self.pg1.local_ip4)
+ self.assertEqual(packet[IP].dst, self.pg1.remote_ip4)
+ self.assertEqual(packet[ICMP].id, self.icmp_id_in)
+ self.assertEqual(packet[ICMP].type, 0) # echo reply
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet "
+ "(outside network):", packet))
+ raise
+
+ def test_ping_internal_host_from_outside(self):
+ """ NAT44EI ping internal host from outside network """
+
+ self.nat44_add_static_mapping(self.pg0.remote_ip4, self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ # out2in
+ pkt = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr, ttl=64) /
+ ICMP(id=self.icmp_id_out, type='echo-request'))
+ self.pg1.add_stream(pkt)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ self.verify_capture_in(capture, self.pg0)
+ self.assert_equal(capture[0][IP].proto, IP_PROTOS.icmp)
+
+ # in2out
+ pkt = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4, ttl=64) /
+ ICMP(id=self.icmp_id_in, type='echo-reply'))
+ self.pg0.add_stream(pkt)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ self.verify_capture_out(capture, same_port=True)
+ self.assert_equal(capture[0][IP].proto, IP_PROTOS.icmp)
+
+ def test_forwarding(self):
+ """ NAT44EI forwarding test """
+
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+ self.vapi.nat44_ei_forwarding_enable_disable(enable=1)
+
+ real_ip = self.pg0.remote_ip4
+ alias_ip = self.nat_addr
+ flags = self.config_flags.NAT44_EI_ADDR_ONLY_MAPPING
+ self.vapi.nat44_ei_add_del_static_mapping(
+ is_add=1, local_ip_address=real_ip,
+ external_ip_address=alias_ip,
+ external_sw_if_index=0xFFFFFFFF,
+ flags=flags)
+
+ try:
+ # static mapping match
+
+ pkts = self.create_stream_out(self.pg1)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg0)
+
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture, same_port=True)
+
+ # no static mapping match
+
+ host0 = self.pg0.remote_hosts[0]
+ self.pg0.remote_hosts[0] = self.pg0.remote_hosts[1]
+ try:
+ pkts = self.create_stream_out(self.pg1,
+ dst_ip=self.pg0.remote_ip4,
+ use_inside_ports=True)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg0)
+
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture, nat_ip=self.pg0.remote_ip4,
+ same_port=True)
+ finally:
+ self.pg0.remote_hosts[0] = host0
+
+ finally:
+ self.vapi.nat44_ei_forwarding_enable_disable(enable=0)
+ flags = self.config_flags.NAT44_EI_ADDR_ONLY_MAPPING
+ self.vapi.nat44_ei_add_del_static_mapping(
+ is_add=0,
+ local_ip_address=real_ip,
+ external_ip_address=alias_ip,
+ external_sw_if_index=0xFFFFFFFF,
+ flags=flags)
+
+ def test_static_in(self):
+ """ NAT44EI 1:1 NAT initialized from inside network """
+
+ nat_ip = "10.0.0.10"
+ self.tcp_port_out = 6303
+ self.udp_port_out = 6304
+ self.icmp_id_out = 6305
+
+ self.nat44_add_static_mapping(self.pg0.remote_ip4, nat_ip)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+ sm = self.vapi.nat44_ei_static_mapping_dump()
+ self.assertEqual(len(sm), 1)
+ self.assertEqual(sm[0].tag, '')
+ self.assertEqual(sm[0].protocol, 0)
+ self.assertEqual(sm[0].local_port, 0)
+ self.assertEqual(sm[0].external_port, 0)
+
+ # in2out
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture, nat_ip, True)
+
+ # out2in
+ pkts = self.create_stream_out(self.pg1, nat_ip)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg0)
+
+ def test_static_out(self):
+ """ NAT44EI 1:1 NAT initialized from outside network """
+
+ nat_ip = "10.0.0.20"
+ self.tcp_port_out = 6303
+ self.udp_port_out = 6304
+ self.icmp_id_out = 6305
+ tag = "testTAG"
+
+ self.nat44_add_static_mapping(self.pg0.remote_ip4, nat_ip, tag=tag)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+ sm = self.vapi.nat44_ei_static_mapping_dump()
+ self.assertEqual(len(sm), 1)
+ self.assertEqual(sm[0].tag, tag)
+
+ # out2in
+ pkts = self.create_stream_out(self.pg1, nat_ip)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg0)
+
+ # in2out
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture, nat_ip, True)
+
+ def test_static_with_port_in(self):
+ """ NAT44EI 1:1 NAPT initialized from inside network """
+
+ self.tcp_port_out = 3606
+ self.udp_port_out = 3607
+ self.icmp_id_out = 3608
+
+ self.nat44_add_address(self.nat_addr)
+ self.nat44_add_static_mapping(self.pg0.remote_ip4, self.nat_addr,
+ self.tcp_port_in, self.tcp_port_out,
+ proto=IP_PROTOS.tcp)
+ self.nat44_add_static_mapping(self.pg0.remote_ip4, self.nat_addr,
+ self.udp_port_in, self.udp_port_out,
+ proto=IP_PROTOS.udp)
+ self.nat44_add_static_mapping(self.pg0.remote_ip4, self.nat_addr,
+ self.icmp_id_in, self.icmp_id_out,
+ proto=IP_PROTOS.icmp)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ # in2out
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture)
+
+ # out2in
+ pkts = self.create_stream_out(self.pg1)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg0)
+
+ def test_static_with_port_out(self):
+ """ NAT44EI 1:1 NAPT initialized from outside network """
+
+ self.tcp_port_out = 30606
+ self.udp_port_out = 30607
+ self.icmp_id_out = 30608
+
+ self.nat44_add_address(self.nat_addr)
+ self.nat44_add_static_mapping(self.pg0.remote_ip4, self.nat_addr,
+ self.tcp_port_in, self.tcp_port_out,
+ proto=IP_PROTOS.tcp)
+ self.nat44_add_static_mapping(self.pg0.remote_ip4, self.nat_addr,
+ self.udp_port_in, self.udp_port_out,
+ proto=IP_PROTOS.udp)
+ self.nat44_add_static_mapping(self.pg0.remote_ip4, self.nat_addr,
+ self.icmp_id_in, self.icmp_id_out,
+ proto=IP_PROTOS.icmp)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ # out2in
+ pkts = self.create_stream_out(self.pg1)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg0)
+
+ # in2out
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture)
+
+ def test_static_vrf_aware(self):
+ """ NAT44EI 1:1 NAT VRF awareness """
+
+ nat_ip1 = "10.0.0.30"
+ nat_ip2 = "10.0.0.40"
+ self.tcp_port_out = 6303
+ self.udp_port_out = 6304
+ self.icmp_id_out = 6305
+
+ self.nat44_add_static_mapping(self.pg4.remote_ip4, nat_ip1,
+ vrf_id=10)
+ self.nat44_add_static_mapping(self.pg0.remote_ip4, nat_ip2,
+ vrf_id=10)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg3.sw_if_index,
+ is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg4.sw_if_index,
+ flags=flags, is_add=1)
+
+ # inside interface VRF match NAT44EI static mapping VRF
+ pkts = self.create_stream_in(self.pg4, self.pg3)
+ self.pg4.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg3.get_capture(len(pkts))
+ self.verify_capture_out(capture, nat_ip1, True)
+
+ # inside interface VRF don't match NAT44EI static mapping VRF (packets
+ # are dropped)
+ pkts = self.create_stream_in(self.pg0, self.pg3)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg3.assert_nothing_captured()
+
+ def test_dynamic_to_static(self):
+ """ NAT44EI Switch from dynamic translation to 1:1NAT """
+ nat_ip = "10.0.0.10"
+ self.tcp_port_out = 6303
+ self.udp_port_out = 6304
+ self.icmp_id_out = 6305
+
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ # dynamic
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture)
+
+ # 1:1NAT
+ self.nat44_add_static_mapping(self.pg0.remote_ip4, nat_ip)
+ sessions = self.vapi.nat44_ei_user_session_dump(self.pg0.remote_ip4, 0)
+ self.assertEqual(len(sessions), 0)
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture, nat_ip, True)
+
+ def test_identity_nat(self):
+ """ NAT44EI Identity NAT """
+ flags = self.config_flags.NAT44_EI_ADDR_ONLY_MAPPING
+ self.vapi.nat44_ei_add_del_identity_mapping(
+ ip_address=self.pg0.remote_ip4, sw_if_index=0xFFFFFFFF,
+ flags=flags, is_add=1)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ p = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.pg0.remote_ip4) /
+ TCP(sport=12345, dport=56789))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.dst, self.pg0.remote_ip4)
+ self.assertEqual(ip.src, self.pg1.remote_ip4)
+ self.assertEqual(tcp.dport, 56789)
+ self.assertEqual(tcp.sport, 12345)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ sessions = self.vapi.nat44_ei_user_session_dump(self.pg0.remote_ip4, 0)
+ self.assertEqual(len(sessions), 0)
+ flags = self.config_flags.NAT44_EI_ADDR_ONLY_MAPPING
+ self.vapi.nat44_ei_add_del_identity_mapping(
+ ip_address=self.pg0.remote_ip4, sw_if_index=0xFFFFFFFF,
+ flags=flags, vrf_id=1, is_add=1)
+ identity_mappings = self.vapi.nat44_ei_identity_mapping_dump()
+ self.assertEqual(len(identity_mappings), 2)
+
+ def test_multiple_inside_interfaces(self):
+ """ NAT44EI multiple non-overlapping address space inside interfaces
+ """
+
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg3.sw_if_index,
+ is_add=1)
+
+ # between two NAT44EI inside interfaces (no translation)
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_no_translation(capture, self.pg0, self.pg1)
+
+ # from inside to interface without translation
+ pkts = self.create_stream_in(self.pg0, self.pg2)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg2.get_capture(len(pkts))
+ self.verify_capture_no_translation(capture, self.pg0, self.pg2)
+
+ # in2out 1st interface
+ pkts = self.create_stream_in(self.pg0, self.pg3)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg3.get_capture(len(pkts))
+ self.verify_capture_out(capture)
+
+ # out2in 1st interface
+ pkts = self.create_stream_out(self.pg3)
+ self.pg3.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg0)
+
+ # in2out 2nd interface
+ pkts = self.create_stream_in(self.pg1, self.pg3)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg3.get_capture(len(pkts))
+ self.verify_capture_out(capture)
+
+ # out2in 2nd interface
+ pkts = self.create_stream_out(self.pg3)
+ self.pg3.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg1)
+
+ def test_inside_overlapping_interfaces(self):
+ """ NAT44EI multiple inside interfaces with overlapping address space
+ """
+
+ static_nat_ip = "10.0.0.10"
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg3.sw_if_index,
+ is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg4.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg5.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg6.sw_if_index,
+ flags=flags, is_add=1)
+ self.nat44_add_static_mapping(self.pg6.remote_ip4, static_nat_ip,
+ vrf_id=20)
+
+ # between NAT44EI inside interfaces with same VRF (no translation)
+ pkts = self.create_stream_in(self.pg4, self.pg5)
+ self.pg4.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg5.get_capture(len(pkts))
+ self.verify_capture_no_translation(capture, self.pg4, self.pg5)
+
+ # between NAT44EI inside interfaces with different VRF (hairpinning)
+ p = (Ether(src=self.pg4.remote_mac, dst=self.pg4.local_mac) /
+ IP(src=self.pg4.remote_ip4, dst=static_nat_ip) /
+ TCP(sport=1234, dport=5678))
+ self.pg4.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg6.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.nat_addr)
+ self.assertEqual(ip.dst, self.pg6.remote_ip4)
+ self.assertNotEqual(tcp.sport, 1234)
+ self.assertEqual(tcp.dport, 5678)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # in2out 1st interface
+ pkts = self.create_stream_in(self.pg4, self.pg3)
+ self.pg4.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg3.get_capture(len(pkts))
+ self.verify_capture_out(capture)
+
+ # out2in 1st interface
+ pkts = self.create_stream_out(self.pg3)
+ self.pg3.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg4.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg4)
+
+ # in2out 2nd interface
+ pkts = self.create_stream_in(self.pg5, self.pg3)
+ self.pg5.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg3.get_capture(len(pkts))
+ self.verify_capture_out(capture)
+
+ # out2in 2nd interface
+ pkts = self.create_stream_out(self.pg3)
+ self.pg3.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg5.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg5)
+
+ # pg5 session dump
+ addresses = self.vapi.nat44_ei_address_dump()
+ self.assertEqual(len(addresses), 1)
+ sessions = self.vapi.nat44_ei_user_session_dump(
+ self.pg5.remote_ip4, 10)
+ self.assertEqual(len(sessions), 3)
+ for session in sessions:
+ self.assertFalse(session.flags &
+ self.config_flags.NAT44_EI_STATIC_MAPPING)
+ self.assertEqual(str(session.inside_ip_address),
+ self.pg5.remote_ip4)
+ self.assertEqual(session.outside_ip_address,
+ addresses[0].ip_address)
+ self.assertEqual(sessions[0].protocol, IP_PROTOS.tcp)
+ self.assertEqual(sessions[1].protocol, IP_PROTOS.udp)
+ self.assertEqual(sessions[2].protocol, IP_PROTOS.icmp)
+ self.assertEqual(sessions[0].inside_port, self.tcp_port_in)
+ self.assertEqual(sessions[1].inside_port, self.udp_port_in)
+ self.assertEqual(sessions[2].inside_port, self.icmp_id_in)
+ self.assertEqual(sessions[0].outside_port, self.tcp_port_out)
+ self.assertEqual(sessions[1].outside_port, self.udp_port_out)
+ self.assertEqual(sessions[2].outside_port, self.icmp_id_out)
+
+ # in2out 3rd interface
+ pkts = self.create_stream_in(self.pg6, self.pg3)
+ self.pg6.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg3.get_capture(len(pkts))
+ self.verify_capture_out(capture, static_nat_ip, True)
+
+ # out2in 3rd interface
+ pkts = self.create_stream_out(self.pg3, static_nat_ip)
+ self.pg3.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg6.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg6)
+
+ # general user and session dump verifications
+ users = self.vapi.nat44_ei_user_dump()
+ self.assertGreaterEqual(len(users), 3)
+ addresses = self.vapi.nat44_ei_address_dump()
+ self.assertEqual(len(addresses), 1)
+ for user in users:
+ sessions = self.vapi.nat44_ei_user_session_dump(user.ip_address,
+ user.vrf_id)
+ for session in sessions:
+ self.assertEqual(user.ip_address, session.inside_ip_address)
+ self.assertTrue(session.total_bytes > session.total_pkts > 0)
+ self.assertTrue(session.protocol in
+ [IP_PROTOS.tcp, IP_PROTOS.udp,
+ IP_PROTOS.icmp])
+
+ # pg4 session dump
+ sessions = self.vapi.nat44_ei_user_session_dump(
+ self.pg4.remote_ip4, 10)
+ self.assertGreaterEqual(len(sessions), 4)
+ for session in sessions:
+ self.assertFalse(
+ session.flags & self.config_flags.NAT44_EI_STATIC_MAPPING)
+ self.assertEqual(str(session.inside_ip_address),
+ self.pg4.remote_ip4)
+ self.assertEqual(session.outside_ip_address,
+ addresses[0].ip_address)
+
+ # pg6 session dump
+ sessions = self.vapi.nat44_ei_user_session_dump(
+ self.pg6.remote_ip4, 20)
+ self.assertGreaterEqual(len(sessions), 3)
+ for session in sessions:
+ self.assertTrue(
+ session.flags & self.config_flags.NAT44_EI_STATIC_MAPPING)
+ self.assertEqual(str(session.inside_ip_address),
+ self.pg6.remote_ip4)
+ self.assertEqual(str(session.outside_ip_address),
+ static_nat_ip)
+ self.assertTrue(session.inside_port in
+ [self.tcp_port_in, self.udp_port_in,
+ self.icmp_id_in])
+
+ def test_hairpinning(self):
+ """ NAT44EI hairpinning - 1:1 NAPT """
+
+ host = self.pg0.remote_hosts[0]
+ server = self.pg0.remote_hosts[1]
+ host_in_port = 1234
+ host_out_port = 0
+ server_in_port = 5678
+ server_out_port = 8765
+
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ # add static mapping for server
+ self.nat44_add_static_mapping(server.ip4, self.nat_addr,
+ server_in_port, server_out_port,
+ proto=IP_PROTOS.tcp)
+
+ cnt = self.statistics['/nat44-ei/hairpinning']
+ # send packet from host to server
+ p = (Ether(src=host.mac, dst=self.pg0.local_mac) /
+ IP(src=host.ip4, dst=self.nat_addr) /
+ TCP(sport=host_in_port, dport=server_out_port))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.nat_addr)
+ self.assertEqual(ip.dst, server.ip4)
+ self.assertNotEqual(tcp.sport, host_in_port)
+ self.assertEqual(tcp.dport, server_in_port)
+ self.assert_packet_checksums_valid(p)
+ host_out_port = tcp.sport
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ after = self.statistics['/nat44-ei/hairpinning']
+ if_idx = self.pg0.sw_if_index
+ self.assertEqual(after[:, if_idx].sum() - cnt[:, if_idx].sum(), 1)
+
+ # send reply from server to host
+ p = (Ether(src=server.mac, dst=self.pg0.local_mac) /
+ IP(src=server.ip4, dst=self.nat_addr) /
+ TCP(sport=server_in_port, dport=host_out_port))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.nat_addr)
+ self.assertEqual(ip.dst, host.ip4)
+ self.assertEqual(tcp.sport, server_out_port)
+ self.assertEqual(tcp.dport, host_in_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ after = self.statistics['/nat44-ei/hairpinning']
+ if_idx = self.pg0.sw_if_index
+ self.assertEqual(after[:, if_idx].sum() - cnt[:, if_idx].sum(),
+ 2+(1 if self.vpp_worker_count > 0 else 0))
+
+ def test_hairpinning2(self):
+ """ NAT44EI hairpinning - 1:1 NAT"""
+
+ server1_nat_ip = "10.0.0.10"
+ server2_nat_ip = "10.0.0.11"
+ host = self.pg0.remote_hosts[0]
+ server1 = self.pg0.remote_hosts[1]
+ server2 = self.pg0.remote_hosts[2]
+ server_tcp_port = 22
+ server_udp_port = 20
+
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ # add static mapping for servers
+ self.nat44_add_static_mapping(server1.ip4, server1_nat_ip)
+ self.nat44_add_static_mapping(server2.ip4, server2_nat_ip)
+
+ # host to server1
+ pkts = []
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=host.ip4, dst=server1_nat_ip) /
+ TCP(sport=self.tcp_port_in, dport=server_tcp_port))
+ pkts.append(p)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=host.ip4, dst=server1_nat_ip) /
+ UDP(sport=self.udp_port_in, dport=server_udp_port))
+ pkts.append(p)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=host.ip4, dst=server1_nat_ip) /
+ ICMP(id=self.icmp_id_in, type='echo-request'))
+ pkts.append(p)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ for packet in capture:
+ try:
+ self.assertEqual(packet[IP].src, self.nat_addr)
+ self.assertEqual(packet[IP].dst, server1.ip4)
+ if packet.haslayer(TCP):
+ self.assertNotEqual(packet[TCP].sport, self.tcp_port_in)
+ self.assertEqual(packet[TCP].dport, server_tcp_port)
+ self.tcp_port_out = packet[TCP].sport
+ self.assert_packet_checksums_valid(packet)
+ elif packet.haslayer(UDP):
+ self.assertNotEqual(packet[UDP].sport, self.udp_port_in)
+ self.assertEqual(packet[UDP].dport, server_udp_port)
+ self.udp_port_out = packet[UDP].sport
+ else:
+ self.assertNotEqual(packet[ICMP].id, self.icmp_id_in)
+ self.icmp_id_out = packet[ICMP].id
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ # server1 to host
+ pkts = []
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=server1.ip4, dst=self.nat_addr) /
+ TCP(sport=server_tcp_port, dport=self.tcp_port_out))
+ pkts.append(p)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=server1.ip4, dst=self.nat_addr) /
+ UDP(sport=server_udp_port, dport=self.udp_port_out))
+ pkts.append(p)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=server1.ip4, dst=self.nat_addr) /
+ ICMP(id=self.icmp_id_out, type='echo-reply'))
+ pkts.append(p)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ for packet in capture:
+ try:
+ self.assertEqual(packet[IP].src, server1_nat_ip)
+ self.assertEqual(packet[IP].dst, host.ip4)
+ if packet.haslayer(TCP):
+ self.assertEqual(packet[TCP].dport, self.tcp_port_in)
+ self.assertEqual(packet[TCP].sport, server_tcp_port)
+ self.assert_packet_checksums_valid(packet)
+ elif packet.haslayer(UDP):
+ self.assertEqual(packet[UDP].dport, self.udp_port_in)
+ self.assertEqual(packet[UDP].sport, server_udp_port)
+ else:
+ self.assertEqual(packet[ICMP].id, self.icmp_id_in)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ # server2 to server1
+ pkts = []
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=server2.ip4, dst=server1_nat_ip) /
+ TCP(sport=self.tcp_port_in, dport=server_tcp_port))
+ pkts.append(p)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=server2.ip4, dst=server1_nat_ip) /
+ UDP(sport=self.udp_port_in, dport=server_udp_port))
+ pkts.append(p)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=server2.ip4, dst=server1_nat_ip) /
+ ICMP(id=self.icmp_id_in, type='echo-request'))
+ pkts.append(p)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ for packet in capture:
+ try:
+ self.assertEqual(packet[IP].src, server2_nat_ip)
+ self.assertEqual(packet[IP].dst, server1.ip4)
+ if packet.haslayer(TCP):
+ self.assertEqual(packet[TCP].sport, self.tcp_port_in)
+ self.assertEqual(packet[TCP].dport, server_tcp_port)
+ self.tcp_port_out = packet[TCP].sport
+ self.assert_packet_checksums_valid(packet)
+ elif packet.haslayer(UDP):
+ self.assertEqual(packet[UDP].sport, self.udp_port_in)
+ self.assertEqual(packet[UDP].dport, server_udp_port)
+ self.udp_port_out = packet[UDP].sport
+ else:
+ self.assertEqual(packet[ICMP].id, self.icmp_id_in)
+ self.icmp_id_out = packet[ICMP].id
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ # server1 to server2
+ pkts = []
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=server1.ip4, dst=server2_nat_ip) /
+ TCP(sport=server_tcp_port, dport=self.tcp_port_out))
+ pkts.append(p)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=server1.ip4, dst=server2_nat_ip) /
+ UDP(sport=server_udp_port, dport=self.udp_port_out))
+ pkts.append(p)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=server1.ip4, dst=server2_nat_ip) /
+ ICMP(id=self.icmp_id_out, type='echo-reply'))
+ pkts.append(p)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ for packet in capture:
+ try:
+ self.assertEqual(packet[IP].src, server1_nat_ip)
+ self.assertEqual(packet[IP].dst, server2.ip4)
+ if packet.haslayer(TCP):
+ self.assertEqual(packet[TCP].dport, self.tcp_port_in)
+ self.assertEqual(packet[TCP].sport, server_tcp_port)
+ self.assert_packet_checksums_valid(packet)
+ elif packet.haslayer(UDP):
+ self.assertEqual(packet[UDP].dport, self.udp_port_in)
+ self.assertEqual(packet[UDP].sport, server_udp_port)
+ else:
+ self.assertEqual(packet[ICMP].id, self.icmp_id_in)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ def test_hairpinning_avoid_inf_loop(self):
+ """ NAT44EI hairpinning - 1:1 NAPT avoid infinite loop """
+
+ host = self.pg0.remote_hosts[0]
+ server = self.pg0.remote_hosts[1]
+ host_in_port = 1234
+ host_out_port = 0
+ server_in_port = 5678
+ server_out_port = 8765
+
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ # add static mapping for server
+ self.nat44_add_static_mapping(server.ip4, self.nat_addr,
+ server_in_port, server_out_port,
+ proto=IP_PROTOS.tcp)
+
+ # add another static mapping that maps pg0.local_ip4 address to itself
+ self.nat44_add_static_mapping(self.pg0.local_ip4, self.pg0.local_ip4)
+
+ # send packet from host to VPP (the packet should get dropped)
+ p = (Ether(src=host.mac, dst=self.pg0.local_mac) /
+ IP(src=host.ip4, dst=self.pg0.local_ip4) /
+ TCP(sport=host_in_port, dport=server_out_port))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ # Here VPP used to crash due to an infinite loop
+
+ cnt = self.statistics['/nat44-ei/hairpinning']
+ # send packet from host to server
+ p = (Ether(src=host.mac, dst=self.pg0.local_mac) /
+ IP(src=host.ip4, dst=self.nat_addr) /
+ TCP(sport=host_in_port, dport=server_out_port))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.nat_addr)
+ self.assertEqual(ip.dst, server.ip4)
+ self.assertNotEqual(tcp.sport, host_in_port)
+ self.assertEqual(tcp.dport, server_in_port)
+ self.assert_packet_checksums_valid(p)
+ host_out_port = tcp.sport
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ after = self.statistics['/nat44-ei/hairpinning']
+ if_idx = self.pg0.sw_if_index
+ self.assertEqual(after[:, if_idx].sum() - cnt[:, if_idx].sum(), 1)
+
+ # send reply from server to host
+ p = (Ether(src=server.mac, dst=self.pg0.local_mac) /
+ IP(src=server.ip4, dst=self.nat_addr) /
+ TCP(sport=server_in_port, dport=host_out_port))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.nat_addr)
+ self.assertEqual(ip.dst, host.ip4)
+ self.assertEqual(tcp.sport, server_out_port)
+ self.assertEqual(tcp.dport, host_in_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ after = self.statistics['/nat44-ei/hairpinning']
+ if_idx = self.pg0.sw_if_index
+ self.assertEqual(after[:, if_idx].sum() - cnt[:, if_idx].sum(),
+ 2+(1 if self.vpp_worker_count > 0 else 0))
+
+ def test_interface_addr(self):
+ """ NAT44EI acquire addresses from interface """
+ self.vapi.nat44_ei_add_del_interface_addr(
+ is_add=1,
+ sw_if_index=self.pg7.sw_if_index)
+
+ # no address in NAT pool
+ addresses = self.vapi.nat44_ei_address_dump()
+ self.assertEqual(0, len(addresses))
+
+ # configure interface address and check NAT address pool
+ self.pg7.config_ip4()
+ addresses = self.vapi.nat44_ei_address_dump()
+ self.assertEqual(1, len(addresses))
+ self.assertEqual(str(addresses[0].ip_address), self.pg7.local_ip4)
+
+ # remove interface address and check NAT address pool
+ self.pg7.unconfig_ip4()
+ addresses = self.vapi.nat44_ei_address_dump()
+ self.assertEqual(0, len(addresses))
+
+ def test_interface_addr_static_mapping(self):
+ """ NAT44EI Static mapping with addresses from interface """
+ tag = "testTAG"
+
+ self.vapi.nat44_ei_add_del_interface_addr(
+ is_add=1,
+ sw_if_index=self.pg7.sw_if_index)
+ self.nat44_add_static_mapping(
+ '1.2.3.4',
+ external_sw_if_index=self.pg7.sw_if_index,
+ tag=tag)
+
+ # static mappings with external interface
+ static_mappings = self.vapi.nat44_ei_static_mapping_dump()
+ self.assertEqual(1, len(static_mappings))
+ self.assertEqual(self.pg7.sw_if_index,
+ static_mappings[0].external_sw_if_index)
+ self.assertEqual(static_mappings[0].tag, tag)
+
+ # configure interface address and check static mappings
+ self.pg7.config_ip4()
+ static_mappings = self.vapi.nat44_ei_static_mapping_dump()
+ self.assertEqual(2, len(static_mappings))
+ resolved = False
+ for sm in static_mappings:
+ if sm.external_sw_if_index == 0xFFFFFFFF:
+ self.assertEqual(str(sm.external_ip_address),
+ self.pg7.local_ip4)
+ self.assertEqual(sm.tag, tag)
+ resolved = True
+ self.assertTrue(resolved)
+
+ # remove interface address and check static mappings
+ self.pg7.unconfig_ip4()
+ static_mappings = self.vapi.nat44_ei_static_mapping_dump()
+ self.assertEqual(1, len(static_mappings))
+ self.assertEqual(self.pg7.sw_if_index,
+ static_mappings[0].external_sw_if_index)
+ self.assertEqual(static_mappings[0].tag, tag)
+
+ # configure interface address again and check static mappings
+ self.pg7.config_ip4()
+ static_mappings = self.vapi.nat44_ei_static_mapping_dump()
+ self.assertEqual(2, len(static_mappings))
+ resolved = False
+ for sm in static_mappings:
+ if sm.external_sw_if_index == 0xFFFFFFFF:
+ self.assertEqual(str(sm.external_ip_address),
+ self.pg7.local_ip4)
+ self.assertEqual(sm.tag, tag)
+ resolved = True
+ self.assertTrue(resolved)
+
+ # remove static mapping
+ self.nat44_add_static_mapping(
+ '1.2.3.4',
+ external_sw_if_index=self.pg7.sw_if_index,
+ tag=tag,
+ is_add=0)
+ static_mappings = self.vapi.nat44_ei_static_mapping_dump()
+ self.assertEqual(0, len(static_mappings))
+
+ def test_interface_addr_identity_nat(self):
+ """ NAT44EI Identity NAT with addresses from interface """
+
+ port = 53053
+ self.vapi.nat44_ei_add_del_interface_addr(
+ is_add=1,
+ sw_if_index=self.pg7.sw_if_index)
+ self.vapi.nat44_ei_add_del_identity_mapping(
+ ip_address=b'0',
+ sw_if_index=self.pg7.sw_if_index,
+ port=port,
+ protocol=IP_PROTOS.tcp,
+ is_add=1)
+
+ # identity mappings with external interface
+ identity_mappings = self.vapi.nat44_ei_identity_mapping_dump()
+ self.assertEqual(1, len(identity_mappings))
+ self.assertEqual(self.pg7.sw_if_index,
+ identity_mappings[0].sw_if_index)
+
+ # configure interface address and check identity mappings
+ self.pg7.config_ip4()
+ identity_mappings = self.vapi.nat44_ei_identity_mapping_dump()
+ resolved = False
+ self.assertEqual(2, len(identity_mappings))
+ for sm in identity_mappings:
+ if sm.sw_if_index == 0xFFFFFFFF:
+ self.assertEqual(str(identity_mappings[0].ip_address),
+ self.pg7.local_ip4)
+ self.assertEqual(port, identity_mappings[0].port)
+ self.assertEqual(IP_PROTOS.tcp, identity_mappings[0].protocol)
+ resolved = True
+ self.assertTrue(resolved)
+
+ # remove interface address and check identity mappings
+ self.pg7.unconfig_ip4()
+ identity_mappings = self.vapi.nat44_ei_identity_mapping_dump()
+ self.assertEqual(1, len(identity_mappings))
+ self.assertEqual(self.pg7.sw_if_index,
+ identity_mappings[0].sw_if_index)
+
+ def test_ipfix_nat44_sess(self):
+ """ NAT44EI IPFIX logging NAT44EI session created/deleted """
+ self.ipfix_domain_id = 10
+ self.ipfix_src_port = 20202
+ collector_port = 30303
+ bind_layers(UDP, IPFIX, dport=30303)
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+ self.vapi.set_ipfix_exporter(collector_address=self.pg3.remote_ip4,
+ src_address=self.pg3.local_ip4,
+ path_mtu=512,
+ template_interval=10,
+ collector_port=collector_port)
+ self.vapi.nat44_ei_ipfix_enable_disable(domain_id=self.ipfix_domain_id,
+ src_port=self.ipfix_src_port,
+ enable=1)
+
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture)
+ self.nat44_add_address(self.nat_addr, is_add=0)
+ self.vapi.ipfix_flush()
+ capture = self.pg3.get_capture(7)
+ ipfix = IPFIXDecoder()
+ # first load template
+ for p in capture:
+ self.assertTrue(p.haslayer(IPFIX))
+ self.assertEqual(p[IP].src, self.pg3.local_ip4)
+ self.assertEqual(p[IP].dst, self.pg3.remote_ip4)
+ self.assertEqual(p[UDP].sport, self.ipfix_src_port)
+ self.assertEqual(p[UDP].dport, collector_port)
+ self.assertEqual(p[IPFIX].observationDomainID,
+ self.ipfix_domain_id)
+ if p.haslayer(Template):
+ ipfix.add_template(p.getlayer(Template))
+ # verify events in data set
+ for p in capture:
+ if p.haslayer(Data):
+ data = ipfix.decode_data_set(p.getlayer(Set))
+ self.verify_ipfix_nat44_ses(data)
+
+ def test_ipfix_addr_exhausted(self):
+ """ NAT44EI IPFIX logging NAT addresses exhausted """
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+ self.vapi.set_ipfix_exporter(collector_address=self.pg3.remote_ip4,
+ src_address=self.pg3.local_ip4,
+ path_mtu=512,
+ template_interval=10)
+ self.vapi.nat44_ei_ipfix_enable_disable(domain_id=self.ipfix_domain_id,
+ src_port=self.ipfix_src_port,
+ enable=1)
+
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=3025))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.assert_nothing_captured()
+ sleep(1)
+ self.vapi.ipfix_flush()
+ capture = self.pg3.get_capture(7)
+ ipfix = IPFIXDecoder()
+ # first load template
+ for p in capture:
+ self.assertTrue(p.haslayer(IPFIX))
+ self.assertEqual(p[IP].src, self.pg3.local_ip4)
+ self.assertEqual(p[IP].dst, self.pg3.remote_ip4)
+ self.assertEqual(p[UDP].sport, self.ipfix_src_port)
+ self.assertEqual(p[UDP].dport, 4739)
+ self.assertEqual(p[IPFIX].observationDomainID,
+ self.ipfix_domain_id)
+ if p.haslayer(Template):
+ ipfix.add_template(p.getlayer(Template))
+ # verify events in data set
+ for p in capture:
+ if p.haslayer(Data):
+ data = ipfix.decode_data_set(p.getlayer(Set))
+ self.verify_ipfix_addr_exhausted(data)
+
+ def test_ipfix_max_sessions(self):
+ """ NAT44EI IPFIX logging maximum session entries exceeded """
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ max_sessions_per_thread = self.max_translations
+ max_sessions = max(1, self.vpp_worker_count) * max_sessions_per_thread
+
+ pkts = []
+ for i in range(0, max_sessions):
+ src = "10.10.%u.%u" % ((i & 0xFF00) >> 8, i & 0xFF)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=src, dst=self.pg1.remote_ip4) /
+ TCP(sport=1025))
+ pkts.append(p)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ self.pg1.get_capture(max_sessions)
+ self.vapi.set_ipfix_exporter(collector_address=self.pg3.remote_ip4,
+ src_address=self.pg3.local_ip4,
+ path_mtu=512,
+ template_interval=10)
+ self.vapi.nat44_ei_ipfix_enable_disable(domain_id=self.ipfix_domain_id,
+ src_port=self.ipfix_src_port,
+ enable=1)
+
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=1025))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.assert_nothing_captured()
+ sleep(1)
+ self.vapi.ipfix_flush()
+ capture = self.pg3.get_capture(7)
+ ipfix = IPFIXDecoder()
+ # first load template
+ for p in capture:
+ self.assertTrue(p.haslayer(IPFIX))
+ self.assertEqual(p[IP].src, self.pg3.local_ip4)
+ self.assertEqual(p[IP].dst, self.pg3.remote_ip4)
+ self.assertEqual(p[UDP].sport, self.ipfix_src_port)
+ self.assertEqual(p[UDP].dport, 4739)
+ self.assertEqual(p[IPFIX].observationDomainID,
+ self.ipfix_domain_id)
+ if p.haslayer(Template):
+ ipfix.add_template(p.getlayer(Template))
+ # verify events in data set
+ for p in capture:
+ if p.haslayer(Data):
+ data = ipfix.decode_data_set(p.getlayer(Set))
+ self.verify_ipfix_max_sessions(data, max_sessions_per_thread)
+
+ def test_syslog_apmap(self):
+ """ NAT44EI syslog address and port mapping creation and deletion """
+ self.vapi.syslog_set_filter(
+ self.SYSLOG_SEVERITY.SYSLOG_API_SEVERITY_INFO)
+ self.vapi.syslog_set_sender(self.pg3.local_ip4, self.pg3.remote_ip4)
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=self.tcp_port_in, dport=20))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ self.tcp_port_out = capture[0][TCP].sport
+ capture = self.pg3.get_capture(1)
+ self.verify_syslog_apmap(capture[0][Raw].load)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.nat44_add_address(self.nat_addr, is_add=0)
+ capture = self.pg3.get_capture(1)
+ self.verify_syslog_apmap(capture[0][Raw].load, False)
+
+ def test_pool_addr_fib(self):
+ """ NAT44EI add pool addresses to FIB """
+ static_addr = '10.0.0.10'
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+ self.nat44_add_static_mapping(self.pg0.remote_ip4, static_addr)
+
+ # NAT44EI address
+ p = (Ether(src=self.pg1.remote_mac, dst='ff:ff:ff:ff:ff:ff') /
+ ARP(op=ARP.who_has, pdst=self.nat_addr,
+ psrc=self.pg1.remote_ip4, hwsrc=self.pg1.remote_mac))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ self.assertTrue(capture[0].haslayer(ARP))
+ self.assertTrue(capture[0][ARP].op, ARP.is_at)
+
+ # 1:1 NAT address
+ p = (Ether(src=self.pg1.remote_mac, dst='ff:ff:ff:ff:ff:ff') /
+ ARP(op=ARP.who_has, pdst=static_addr,
+ psrc=self.pg1.remote_ip4, hwsrc=self.pg1.remote_mac))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ self.assertTrue(capture[0].haslayer(ARP))
+ self.assertTrue(capture[0][ARP].op, ARP.is_at)
+
+ # send ARP to non-NAT44EI interface
+ p = (Ether(src=self.pg2.remote_mac, dst='ff:ff:ff:ff:ff:ff') /
+ ARP(op=ARP.who_has, pdst=self.nat_addr,
+ psrc=self.pg2.remote_ip4, hwsrc=self.pg2.remote_mac))
+ self.pg2.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.assert_nothing_captured()
+
+ # remove addresses and verify
+ self.nat44_add_address(self.nat_addr, is_add=0)
+ self.nat44_add_static_mapping(self.pg0.remote_ip4, static_addr,
+ is_add=0)
+
+ p = (Ether(src=self.pg1.remote_mac, dst='ff:ff:ff:ff:ff:ff') /
+ ARP(op=ARP.who_has, pdst=self.nat_addr,
+ psrc=self.pg1.remote_ip4, hwsrc=self.pg1.remote_mac))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.assert_nothing_captured()
+
+ p = (Ether(src=self.pg1.remote_mac, dst='ff:ff:ff:ff:ff:ff') /
+ ARP(op=ARP.who_has, pdst=static_addr,
+ psrc=self.pg1.remote_ip4, hwsrc=self.pg1.remote_mac))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.assert_nothing_captured()
+
+ def test_vrf_mode(self):
+ """ NAT44EI tenant VRF aware address pool mode """
+
+ vrf_id1 = 1
+ vrf_id2 = 2
+ nat_ip1 = "10.0.0.10"
+ nat_ip2 = "10.0.0.11"
+
+ self.pg0.unconfig_ip4()
+ self.pg1.unconfig_ip4()
+ self.vapi.ip_table_add_del(is_add=1, table={'table_id': vrf_id1})
+ self.vapi.ip_table_add_del(is_add=1, table={'table_id': vrf_id2})
+ self.pg0.set_table_ip4(vrf_id1)
+ self.pg1.set_table_ip4(vrf_id2)
+ self.pg0.config_ip4()
+ self.pg1.config_ip4()
+ self.pg0.resolve_arp()
+ self.pg1.resolve_arp()
+
+ self.nat44_add_address(nat_ip1, vrf_id=vrf_id1)
+ self.nat44_add_address(nat_ip2, vrf_id=vrf_id2)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg2.sw_if_index,
+ is_add=1)
+
+ try:
+ # first VRF
+ pkts = self.create_stream_in(self.pg0, self.pg2)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg2.get_capture(len(pkts))
+ self.verify_capture_out(capture, nat_ip1)
+
+ # second VRF
+ pkts = self.create_stream_in(self.pg1, self.pg2)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg2.get_capture(len(pkts))
+ self.verify_capture_out(capture, nat_ip2)
+
+ finally:
+ self.pg0.unconfig_ip4()
+ self.pg1.unconfig_ip4()
+ self.pg0.set_table_ip4(0)
+ self.pg1.set_table_ip4(0)
+ self.pg0.config_ip4()
+ self.pg1.config_ip4()
+ self.pg0.resolve_arp()
+ self.pg1.resolve_arp()
+ self.vapi.ip_table_add_del(is_add=0, table={'table_id': vrf_id1})
+ self.vapi.ip_table_add_del(is_add=0, table={'table_id': vrf_id2})
+
+ def test_vrf_feature_independent(self):
+ """ NAT44EI tenant VRF independent address pool mode """
+
+ nat_ip1 = "10.0.0.10"
+ nat_ip2 = "10.0.0.11"
+
+ self.nat44_add_address(nat_ip1)
+ self.nat44_add_address(nat_ip2, vrf_id=99)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg2.sw_if_index,
+ is_add=1)
+
+ # first VRF
+ pkts = self.create_stream_in(self.pg0, self.pg2)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg2.get_capture(len(pkts))
+ self.verify_capture_out(capture, nat_ip1)
+
+ # second VRF
+ pkts = self.create_stream_in(self.pg1, self.pg2)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg2.get_capture(len(pkts))
+ self.verify_capture_out(capture, nat_ip1)
+
+ def test_dynamic_ipless_interfaces(self):
+ """ NAT44EI interfaces without configured IP address """
+ self.create_routes_and_neigbors()
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg7.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg8.sw_if_index,
+ is_add=1)
+
+ # in2out
+ pkts = self.create_stream_in(self.pg7, self.pg8)
+ self.pg7.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg8.get_capture(len(pkts))
+ self.verify_capture_out(capture)
+
+ # out2in
+ pkts = self.create_stream_out(self.pg8, self.nat_addr)
+ self.pg8.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg7.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg7)
+
+ def test_static_ipless_interfaces(self):
+ """ NAT44EI interfaces without configured IP address - 1:1 NAT """
+
+ self.create_routes_and_neigbors()
+ self.nat44_add_static_mapping(self.pg7.remote_ip4, self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg7.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg8.sw_if_index,
+ is_add=1)
+
+ # out2in
+ pkts = self.create_stream_out(self.pg8)
+ self.pg8.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg7.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg7)
+
+ # in2out
+ pkts = self.create_stream_in(self.pg7, self.pg8)
+ self.pg7.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg8.get_capture(len(pkts))
+ self.verify_capture_out(capture, self.nat_addr, True)
+
+ def test_static_with_port_ipless_interfaces(self):
+ """ NAT44EI interfaces without configured IP address - 1:1 NAPT """
+
+ self.tcp_port_out = 30606
+ self.udp_port_out = 30607
+ self.icmp_id_out = 30608
+
+ self.create_routes_and_neigbors()
+ self.nat44_add_address(self.nat_addr)
+ self.nat44_add_static_mapping(self.pg7.remote_ip4, self.nat_addr,
+ self.tcp_port_in, self.tcp_port_out,
+ proto=IP_PROTOS.tcp)
+ self.nat44_add_static_mapping(self.pg7.remote_ip4, self.nat_addr,
+ self.udp_port_in, self.udp_port_out,
+ proto=IP_PROTOS.udp)
+ self.nat44_add_static_mapping(self.pg7.remote_ip4, self.nat_addr,
+ self.icmp_id_in, self.icmp_id_out,
+ proto=IP_PROTOS.icmp)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg7.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg8.sw_if_index,
+ is_add=1)
+
+ # out2in
+ pkts = self.create_stream_out(self.pg8)
+ self.pg8.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg7.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg7)
+
+ # in2out
+ pkts = self.create_stream_in(self.pg7, self.pg8)
+ self.pg7.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg8.get_capture(len(pkts))
+ self.verify_capture_out(capture)
+
+ def test_static_unknown_proto(self):
+ """ NAT44EI 1:1 translate packet with unknown protocol """
+ nat_ip = "10.0.0.10"
+ self.nat44_add_static_mapping(self.pg0.remote_ip4, nat_ip)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ # in2out
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ GRE() /
+ IP(src=self.pg2.remote_ip4, dst=self.pg3.remote_ip4) /
+ TCP(sport=1234, dport=1234))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ p = self.pg1.get_capture(1)
+ packet = p[0]
+ try:
+ self.assertEqual(packet[IP].src, nat_ip)
+ self.assertEqual(packet[IP].dst, self.pg1.remote_ip4)
+ self.assertEqual(packet.haslayer(GRE), 1)
+ self.assert_packet_checksums_valid(packet)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ # out2in
+ p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IP(src=self.pg1.remote_ip4, dst=nat_ip) /
+ GRE() /
+ IP(src=self.pg3.remote_ip4, dst=self.pg2.remote_ip4) /
+ TCP(sport=1234, dport=1234))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ p = self.pg0.get_capture(1)
+ packet = p[0]
+ try:
+ self.assertEqual(packet[IP].src, self.pg1.remote_ip4)
+ self.assertEqual(packet[IP].dst, self.pg0.remote_ip4)
+ self.assertEqual(packet.haslayer(GRE), 1)
+ self.assert_packet_checksums_valid(packet)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ def test_hairpinning_static_unknown_proto(self):
+ """ NAT44EI 1:1 translate packet with unknown protocol - hairpinning
+ """
+
+ host = self.pg0.remote_hosts[0]
+ server = self.pg0.remote_hosts[1]
+
+ host_nat_ip = "10.0.0.10"
+ server_nat_ip = "10.0.0.11"
+
+ self.nat44_add_static_mapping(host.ip4, host_nat_ip)
+ self.nat44_add_static_mapping(server.ip4, server_nat_ip)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ # host to server
+ p = (Ether(dst=self.pg0.local_mac, src=host.mac) /
+ IP(src=host.ip4, dst=server_nat_ip) /
+ GRE() /
+ IP(src=self.pg2.remote_ip4, dst=self.pg3.remote_ip4) /
+ TCP(sport=1234, dport=1234))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ p = self.pg0.get_capture(1)
+ packet = p[0]
+ try:
+ self.assertEqual(packet[IP].src, host_nat_ip)
+ self.assertEqual(packet[IP].dst, server.ip4)
+ self.assertEqual(packet.haslayer(GRE), 1)
+ self.assert_packet_checksums_valid(packet)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ # server to host
+ p = (Ether(dst=self.pg0.local_mac, src=server.mac) /
+ IP(src=server.ip4, dst=host_nat_ip) /
+ GRE() /
+ IP(src=self.pg3.remote_ip4, dst=self.pg2.remote_ip4) /
+ TCP(sport=1234, dport=1234))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ p = self.pg0.get_capture(1)
+ packet = p[0]
+ try:
+ self.assertEqual(packet[IP].src, server_nat_ip)
+ self.assertEqual(packet[IP].dst, host.ip4)
+ self.assertEqual(packet.haslayer(GRE), 1)
+ self.assert_packet_checksums_valid(packet)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ def test_output_feature(self):
+ """ NAT44EI output feature (in2out postrouting) """
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_output_feature(
+ is_add=1, flags=flags,
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.nat44_ei_interface_add_del_output_feature(
+ is_add=1, flags=flags,
+ sw_if_index=self.pg1.sw_if_index)
+ self.vapi.nat44_ei_interface_add_del_output_feature(
+ is_add=1,
+ sw_if_index=self.pg3.sw_if_index)
+
+ # in2out
+ pkts = self.create_stream_in(self.pg0, self.pg3)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg3.get_capture(len(pkts))
+ self.verify_capture_out(capture)
+
+ # out2in
+ pkts = self.create_stream_out(self.pg3)
+ self.pg3.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg0)
+
+ # from non-NAT interface to NAT inside interface
+ pkts = self.create_stream_in(self.pg2, self.pg0)
+ self.pg2.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_no_translation(capture, self.pg2, self.pg0)
+
+ def test_output_feature_vrf_aware(self):
+ """ NAT44EI output feature VRF aware (in2out postrouting) """
+ nat_ip_vrf10 = "10.0.0.10"
+ nat_ip_vrf20 = "10.0.0.20"
+
+ r1 = VppIpRoute(self, self.pg3.remote_ip4, 32,
+ [VppRoutePath(self.pg3.remote_ip4,
+ self.pg3.sw_if_index)],
+ table_id=10)
+ r2 = VppIpRoute(self, self.pg3.remote_ip4, 32,
+ [VppRoutePath(self.pg3.remote_ip4,
+ self.pg3.sw_if_index)],
+ table_id=20)
+ r1.add_vpp_config()
+ r2.add_vpp_config()
+
+ self.nat44_add_address(nat_ip_vrf10, vrf_id=10)
+ self.nat44_add_address(nat_ip_vrf20, vrf_id=20)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_output_feature(
+ is_add=1, flags=flags,
+ sw_if_index=self.pg4.sw_if_index)
+ self.vapi.nat44_ei_interface_add_del_output_feature(
+ is_add=1, flags=flags,
+ sw_if_index=self.pg6.sw_if_index)
+ self.vapi.nat44_ei_interface_add_del_output_feature(
+ is_add=1,
+ sw_if_index=self.pg3.sw_if_index)
+
+ # in2out VRF 10
+ pkts = self.create_stream_in(self.pg4, self.pg3)
+ self.pg4.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg3.get_capture(len(pkts))
+ self.verify_capture_out(capture, nat_ip=nat_ip_vrf10)
+
+ # out2in VRF 10
+ pkts = self.create_stream_out(self.pg3, dst_ip=nat_ip_vrf10)
+ self.pg3.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg4.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg4)
+
+ # in2out VRF 20
+ pkts = self.create_stream_in(self.pg6, self.pg3)
+ self.pg6.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg3.get_capture(len(pkts))
+ self.verify_capture_out(capture, nat_ip=nat_ip_vrf20)
+
+ # out2in VRF 20
+ pkts = self.create_stream_out(self.pg3, dst_ip=nat_ip_vrf20)
+ self.pg3.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg6.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg6)
+
+ def test_output_feature_hairpinning(self):
+ """ NAT44EI output feature hairpinning (in2out postrouting) """
+ host = self.pg0.remote_hosts[0]
+ server = self.pg0.remote_hosts[1]
+ host_in_port = 1234
+ host_out_port = 0
+ server_in_port = 5678
+ server_out_port = 8765
+
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_output_feature(
+ is_add=1, flags=flags,
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.nat44_ei_interface_add_del_output_feature(
+ is_add=1,
+ sw_if_index=self.pg1.sw_if_index)
+
+ # add static mapping for server
+ self.nat44_add_static_mapping(server.ip4, self.nat_addr,
+ server_in_port, server_out_port,
+ proto=IP_PROTOS.tcp)
+
+ # send packet from host to server
+ p = (Ether(src=host.mac, dst=self.pg0.local_mac) /
+ IP(src=host.ip4, dst=self.nat_addr) /
+ TCP(sport=host_in_port, dport=server_out_port))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.nat_addr)
+ self.assertEqual(ip.dst, server.ip4)
+ self.assertNotEqual(tcp.sport, host_in_port)
+ self.assertEqual(tcp.dport, server_in_port)
+ self.assert_packet_checksums_valid(p)
+ host_out_port = tcp.sport
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # send reply from server to host
+ p = (Ether(src=server.mac, dst=self.pg0.local_mac) /
+ IP(src=server.ip4, dst=self.nat_addr) /
+ TCP(sport=server_in_port, dport=host_out_port))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.nat_addr)
+ self.assertEqual(ip.dst, host.ip4)
+ self.assertEqual(tcp.sport, server_out_port)
+ self.assertEqual(tcp.dport, host_in_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ def test_one_armed_nat44(self):
+ """ NAT44EI One armed NAT """
+ remote_host = self.pg9.remote_hosts[0]
+ local_host = self.pg9.remote_hosts[1]
+ external_port = 0
+
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg9.sw_if_index,
+ is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg9.sw_if_index,
+ flags=flags, is_add=1)
+
+ # in2out
+ p = (Ether(src=self.pg9.remote_mac, dst=self.pg9.local_mac) /
+ IP(src=local_host.ip4, dst=remote_host.ip4) /
+ TCP(sport=12345, dport=80))
+ self.pg9.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg9.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.nat_addr)
+ self.assertEqual(ip.dst, remote_host.ip4)
+ self.assertNotEqual(tcp.sport, 12345)
+ external_port = tcp.sport
+ self.assertEqual(tcp.dport, 80)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # out2in
+ p = (Ether(src=self.pg9.remote_mac, dst=self.pg9.local_mac) /
+ IP(src=remote_host.ip4, dst=self.nat_addr) /
+ TCP(sport=80, dport=external_port))
+ self.pg9.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg9.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, remote_host.ip4)
+ self.assertEqual(ip.dst, local_host.ip4)
+ self.assertEqual(tcp.sport, 80)
+ self.assertEqual(tcp.dport, 12345)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ if self.vpp_worker_count > 1:
+ node = "nat44-ei-handoff-classify"
+ else:
+ node = "nat44-ei-classify"
+
+ err = self.statistics.get_err_counter('/err/%s/next in2out' % node)
+ self.assertEqual(err, 1)
+ err = self.statistics.get_err_counter('/err/%s/next out2in' % node)
+ self.assertEqual(err, 1)
+
+ def test_del_session(self):
+ """ NAT44EI delete session """
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(len(pkts))
+
+ sessions = self.vapi.nat44_ei_user_session_dump(self.pg0.remote_ip4, 0)
+ nsessions = len(sessions)
+
+ self.vapi.nat44_ei_del_session(
+ address=sessions[0].inside_ip_address,
+ port=sessions[0].inside_port,
+ protocol=sessions[0].protocol,
+ flags=self.config_flags.NAT44_EI_IF_INSIDE)
+
+ self.vapi.nat44_ei_del_session(
+ address=sessions[1].outside_ip_address,
+ port=sessions[1].outside_port,
+ protocol=sessions[1].protocol)
+
+ sessions = self.vapi.nat44_ei_user_session_dump(self.pg0.remote_ip4, 0)
+ self.assertEqual(nsessions - len(sessions), 2)
+
+ self.vapi.nat44_ei_del_session(
+ address=sessions[0].inside_ip_address,
+ port=sessions[0].inside_port,
+ protocol=sessions[0].protocol,
+ flags=self.config_flags.NAT44_EI_IF_INSIDE)
+
+ self.verify_no_nat44_user()
+
+ def test_frag_in_order(self):
+ """ NAT44EI translate fragments arriving in order """
+
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ self.frag_in_order(proto=IP_PROTOS.tcp)
+ self.frag_in_order(proto=IP_PROTOS.udp)
+ self.frag_in_order(proto=IP_PROTOS.icmp)
+
+ def test_frag_forwarding(self):
+ """ NAT44EI forwarding fragment test """
+ self.vapi.nat44_ei_add_del_interface_addr(
+ is_add=1,
+ sw_if_index=self.pg1.sw_if_index)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+ self.vapi.nat44_ei_forwarding_enable_disable(enable=1)
+
+ data = b"A" * 16 + b"B" * 16 + b"C" * 3
+ pkts = self.create_stream_frag(self.pg1,
+ self.pg0.remote_ip4,
+ 4789,
+ 4789,
+ data,
+ proto=IP_PROTOS.udp)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ frags = self.pg0.get_capture(len(pkts))
+ p = self.reass_frags_and_verify(frags,
+ self.pg1.remote_ip4,
+ self.pg0.remote_ip4)
+ self.assertEqual(p[UDP].sport, 4789)
+ self.assertEqual(p[UDP].dport, 4789)
+ self.assertEqual(data, p[Raw].load)
+
+ def test_reass_hairpinning(self):
+ """ NAT44EI fragments hairpinning """
+
+ server_addr = self.pg0.remote_hosts[1].ip4
+ host_in_port = random.randint(1025, 65535)
+ server_in_port = random.randint(1025, 65535)
+ server_out_port = random.randint(1025, 65535)
+
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+ # add static mapping for server
+ self.nat44_add_static_mapping(server_addr, self.nat_addr,
+ server_in_port,
+ server_out_port,
+ proto=IP_PROTOS.tcp)
+ self.nat44_add_static_mapping(server_addr, self.nat_addr,
+ server_in_port,
+ server_out_port,
+ proto=IP_PROTOS.udp)
+ self.nat44_add_static_mapping(server_addr, self.nat_addr)
+
+ self.reass_hairpinning(server_addr, server_in_port, server_out_port,
+ host_in_port, proto=IP_PROTOS.tcp)
+ self.reass_hairpinning(server_addr, server_in_port, server_out_port,
+ host_in_port, proto=IP_PROTOS.udp)
+ self.reass_hairpinning(server_addr, server_in_port, server_out_port,
+ host_in_port, proto=IP_PROTOS.icmp)
+
+ def test_frag_out_of_order(self):
+ """ NAT44EI translate fragments arriving out of order """
+
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ self.frag_out_of_order(proto=IP_PROTOS.tcp)
+ self.frag_out_of_order(proto=IP_PROTOS.udp)
+ self.frag_out_of_order(proto=IP_PROTOS.icmp)
+
+ def test_port_restricted(self):
+ """ NAT44EI Port restricted NAT44EI (MAP-E CE) """
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+ self.vapi.nat44_ei_set_addr_and_port_alloc_alg(alg=1,
+ psid_offset=6,
+ psid_length=6,
+ psid=10)
+
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=4567, dport=22))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.dst, self.pg1.remote_ip4)
+ self.assertEqual(ip.src, self.nat_addr)
+ self.assertEqual(tcp.dport, 22)
+ self.assertNotEqual(tcp.sport, 4567)
+ self.assertEqual((tcp.sport >> 6) & 63, 10)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ def test_port_range(self):
+ """ NAT44EI External address port range """
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+ self.vapi.nat44_ei_set_addr_and_port_alloc_alg(alg=2,
+ start_port=1025,
+ end_port=1027)
+
+ pkts = []
+ for port in range(0, 5):
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=1125 + port))
+ pkts.append(p)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(3)
+ for p in capture:
+ tcp = p[TCP]
+ self.assertGreaterEqual(tcp.sport, 1025)
+ self.assertLessEqual(tcp.sport, 1027)
+
+ def test_multiple_outside_vrf(self):
+ """ NAT44EI Multiple outside VRF """
+ vrf_id1 = 1
+ vrf_id2 = 2
+
+ self.pg1.unconfig_ip4()
+ self.pg2.unconfig_ip4()
+ self.vapi.ip_table_add_del(is_add=1, table={'table_id': vrf_id1})
+ self.vapi.ip_table_add_del(is_add=1, table={'table_id': vrf_id2})
+ self.pg1.set_table_ip4(vrf_id1)
+ self.pg2.set_table_ip4(vrf_id2)
+ self.pg1.config_ip4()
+ self.pg2.config_ip4()
+ self.pg1.resolve_arp()
+ self.pg2.resolve_arp()
+
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg2.sw_if_index,
+ is_add=1)
+
+ try:
+ # first VRF
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture, self.nat_addr)
+
+ pkts = self.create_stream_out(self.pg1, self.nat_addr)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg0)
+
+ self.tcp_port_in = 60303
+ self.udp_port_in = 60304
+ self.icmp_id_in = 60305
+
+ # second VRF
+ pkts = self.create_stream_in(self.pg0, self.pg2)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg2.get_capture(len(pkts))
+ self.verify_capture_out(capture, self.nat_addr)
+
+ pkts = self.create_stream_out(self.pg2, self.nat_addr)
+ self.pg2.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg0)
+
+ finally:
+ self.nat44_add_address(self.nat_addr, is_add=0)
+ self.pg1.unconfig_ip4()
+ self.pg2.unconfig_ip4()
+ self.pg1.set_table_ip4(0)
+ self.pg2.set_table_ip4(0)
+ self.pg1.config_ip4()
+ self.pg2.config_ip4()
+ self.pg1.resolve_arp()
+ self.pg2.resolve_arp()
+
+ def test_mss_clamping(self):
+ """ NAT44EI TCP MSS clamping """
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=self.tcp_port_in, dport=self.tcp_external_port,
+ flags="S", options=[('MSS', 1400)]))
+
+ self.vapi.nat44_ei_set_mss_clamping(enable=1, mss_value=1000)
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ # Negotiated MSS value greater than configured - changed
+ self.verify_mss_value(capture[0], 1000)
+
+ self.vapi.nat44_ei_set_mss_clamping(enable=0, mss_value=1500)
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ # MSS clamping disabled - negotiated MSS unchanged
+ self.verify_mss_value(capture[0], 1400)
+
+ self.vapi.nat44_ei_set_mss_clamping(enable=1, mss_value=1500)
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ # Negotiated MSS value smaller than configured - unchanged
+ self.verify_mss_value(capture[0], 1400)
+
+ def test_ha_send(self):
+ """ NAT44EI Send HA session synchronization events (active) """
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+ self.nat44_add_address(self.nat_addr)
+
+ self.vapi.nat44_ei_ha_set_listener(
+ ip_address=self.pg3.local_ip4, port=12345, path_mtu=512)
+ self.vapi.nat44_ei_ha_set_failover(
+ ip_address=self.pg3.remote_ip4, port=12346,
+ session_refresh_interval=10)
+ bind_layers(UDP, HANATStateSync, sport=12345)
+
+ # create sessions
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture)
+ # active send HA events
+ self.vapi.nat44_ei_ha_flush()
+ stats = self.statistics['/nat44-ei/ha/add-event-send']
+ self.assertEqual(stats[:, 0].sum(), 3)
+ capture = self.pg3.get_capture(1)
+ p = capture[0]
+ self.assert_packet_checksums_valid(p)
+ try:
+ ip = p[IP]
+ udp = p[UDP]
+ hanat = p[HANATStateSync]
+ except IndexError:
+ self.logger.error(ppp("Invalid packet:", p))
+ raise
+ else:
+ self.assertEqual(ip.src, self.pg3.local_ip4)
+ self.assertEqual(ip.dst, self.pg3.remote_ip4)
+ self.assertEqual(udp.sport, 12345)
+ self.assertEqual(udp.dport, 12346)
+ self.assertEqual(hanat.version, 1)
+ # self.assertEqual(hanat.thread_index, 0)
+ self.assertEqual(hanat.count, 3)
+ seq = hanat.sequence_number
+ for event in hanat.events:
+ self.assertEqual(event.event_type, 1)
+ self.assertEqual(event.in_addr, self.pg0.remote_ip4)
+ self.assertEqual(event.out_addr, self.nat_addr)
+ self.assertEqual(event.fib_index, 0)
+
+ # ACK received events
+ ack = (Ether(dst=self.pg3.local_mac, src=self.pg3.remote_mac) /
+ IP(src=self.pg3.remote_ip4, dst=self.pg3.local_ip4) /
+ UDP(sport=12346, dport=12345) /
+ HANATStateSync(sequence_number=seq, flags='ACK',
+ thread_index=hanat.thread_index))
+ self.pg3.add_stream(ack)
+ self.pg_start()
+ stats = self.statistics['/nat44-ei/ha/ack-recv']
+ self.assertEqual(stats[:, 0].sum(), 1)
+
+ # delete one session
+ self.pg_enable_capture(self.pg_interfaces)
+ self.vapi.nat44_ei_del_session(
+ address=self.pg0.remote_ip4, port=self.tcp_port_in,
+ protocol=IP_PROTOS.tcp, flags=self.config_flags.NAT44_EI_IF_INSIDE)
+ self.vapi.nat44_ei_ha_flush()
+ stats = self.statistics['/nat44-ei/ha/del-event-send']
+ self.assertEqual(stats[:, 0].sum(), 1)
+ capture = self.pg3.get_capture(1)
+ p = capture[0]
+ try:
+ hanat = p[HANATStateSync]
+ except IndexError:
+ self.logger.error(ppp("Invalid packet:", p))
+ raise
+ else:
+ self.assertGreater(hanat.sequence_number, seq)
+
+ # do not send ACK, active retry send HA event again
+ self.pg_enable_capture(self.pg_interfaces)
+ sleep(12)
+ stats = self.statistics['/nat44-ei/ha/retry-count']
+ self.assertEqual(stats[:, 0].sum(), 3)
+ stats = self.statistics['/nat44-ei/ha/missed-count']
+ self.assertEqual(stats[:, 0].sum(), 1)
+ capture = self.pg3.get_capture(3)
+ for packet in capture:
+ self.assertEqual(packet, p)
+
+ # session counters refresh
+ pkts = self.create_stream_out(self.pg1)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg0.get_capture(2)
+ self.vapi.nat44_ei_ha_flush()
+ stats = self.statistics['/nat44-ei/ha/refresh-event-send']
+ self.assertEqual(stats[:, 0].sum(), 2)
+ capture = self.pg3.get_capture(1)
+ p = capture[0]
+ self.assert_packet_checksums_valid(p)
+ try:
+ ip = p[IP]
+ udp = p[UDP]
+ hanat = p[HANATStateSync]
+ except IndexError:
+ self.logger.error(ppp("Invalid packet:", p))
+ raise
+ else:
+ self.assertEqual(ip.src, self.pg3.local_ip4)
+ self.assertEqual(ip.dst, self.pg3.remote_ip4)
+ self.assertEqual(udp.sport, 12345)
+ self.assertEqual(udp.dport, 12346)
+ self.assertEqual(hanat.version, 1)
+ self.assertEqual(hanat.count, 2)
+ seq = hanat.sequence_number
+ for event in hanat.events:
+ self.assertEqual(event.event_type, 3)
+ self.assertEqual(event.out_addr, self.nat_addr)
+ self.assertEqual(event.fib_index, 0)
+ self.assertEqual(event.total_pkts, 2)
+ self.assertGreater(event.total_bytes, 0)
+
+ stats = self.statistics['/nat44-ei/ha/ack-recv']
+ ack = (Ether(dst=self.pg3.local_mac, src=self.pg3.remote_mac) /
+ IP(src=self.pg3.remote_ip4, dst=self.pg3.local_ip4) /
+ UDP(sport=12346, dport=12345) /
+ HANATStateSync(sequence_number=seq, flags='ACK',
+ thread_index=hanat.thread_index))
+ self.pg3.add_stream(ack)
+ self.pg_start()
+ stats = self.statistics['/nat44-ei/ha/ack-recv']
+ self.assertEqual(stats[:, 0].sum(), 2)
+
+ def test_ha_recv(self):
+ """ NAT44EI Receive HA session synchronization events (passive) """
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+ self.vapi.nat44_ei_ha_set_listener(ip_address=self.pg3.local_ip4,
+ port=12345, path_mtu=512)
+ bind_layers(UDP, HANATStateSync, sport=12345)
+
+ # this is a bit tricky - HA dictates thread index due to how it's
+ # designed, but once we use HA to create a session, we also want
+ # to pass a packet through said session. so the session must end
+ # up on the correct thread from both directions - in2out (based on
+ # IP address) and out2in (based on outside port)
+
+ # first choose a thread index which is correct for IP
+ thread_index = get_nat44_ei_in2out_worker_index(self.pg0.remote_ip4,
+ self.vpp_worker_count)
+
+ # now pick a port which is correct for given thread
+ port_per_thread = int((0xffff-1024) / max(1, self.vpp_worker_count))
+ self.tcp_port_out = 1024 + random.randint(1, port_per_thread)
+ self.udp_port_out = 1024 + random.randint(1, port_per_thread)
+ if self.vpp_worker_count > 0:
+ self.tcp_port_out += port_per_thread * (thread_index - 1)
+ self.udp_port_out += port_per_thread * (thread_index - 1)
+
+ # send HA session add events to failover/passive
+ p = (Ether(dst=self.pg3.local_mac, src=self.pg3.remote_mac) /
+ IP(src=self.pg3.remote_ip4, dst=self.pg3.local_ip4) /
+ UDP(sport=12346, dport=12345) /
+ HANATStateSync(sequence_number=1, events=[
+ Event(event_type='add', protocol='tcp',
+ in_addr=self.pg0.remote_ip4, out_addr=self.nat_addr,
+ in_port=self.tcp_port_in, out_port=self.tcp_port_out,
+ eh_addr=self.pg1.remote_ip4,
+ ehn_addr=self.pg1.remote_ip4,
+ eh_port=self.tcp_external_port,
+ ehn_port=self.tcp_external_port, fib_index=0),
+ Event(event_type='add', protocol='udp',
+ in_addr=self.pg0.remote_ip4, out_addr=self.nat_addr,
+ in_port=self.udp_port_in, out_port=self.udp_port_out,
+ eh_addr=self.pg1.remote_ip4,
+ ehn_addr=self.pg1.remote_ip4,
+ eh_port=self.udp_external_port,
+ ehn_port=self.udp_external_port, fib_index=0)],
+ thread_index=thread_index))
+
+ self.pg3.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ # receive ACK
+ capture = self.pg3.get_capture(1)
+ p = capture[0]
+ try:
+ hanat = p[HANATStateSync]
+ except IndexError:
+ self.logger.error(ppp("Invalid packet:", p))
+ raise
+ else:
+ self.assertEqual(hanat.sequence_number, 1)
+ self.assertEqual(hanat.flags, 'ACK')
+ self.assertEqual(hanat.version, 1)
+ self.assertEqual(hanat.thread_index, thread_index)
+ stats = self.statistics['/nat44-ei/ha/ack-send']
+ self.assertEqual(stats[:, 0].sum(), 1)
+ stats = self.statistics['/nat44-ei/ha/add-event-recv']
+ self.assertEqual(stats[:, 0].sum(), 2)
+ users = self.statistics['/nat44-ei/total-users']
+ self.assertEqual(users[:, 0].sum(), 1)
+ sessions = self.statistics['/nat44-ei/total-sessions']
+ self.assertEqual(sessions[:, 0].sum(), 2)
+ users = self.vapi.nat44_ei_user_dump()
+ self.assertEqual(len(users), 1)
+ self.assertEqual(str(users[0].ip_address),
+ self.pg0.remote_ip4)
+ # there should be 2 sessions created by HA
+ sessions = self.vapi.nat44_ei_user_session_dump(
+ users[0].ip_address, users[0].vrf_id)
+ self.assertEqual(len(sessions), 2)
+ for session in sessions:
+ self.assertEqual(str(session.inside_ip_address),
+ self.pg0.remote_ip4)
+ self.assertEqual(str(session.outside_ip_address),
+ self.nat_addr)
+ self.assertIn(session.inside_port,
+ [self.tcp_port_in, self.udp_port_in])
+ self.assertIn(session.outside_port,
+ [self.tcp_port_out, self.udp_port_out])
+ self.assertIn(session.protocol, [IP_PROTOS.tcp, IP_PROTOS.udp])
+
+ # send HA session delete event to failover/passive
+ p = (Ether(dst=self.pg3.local_mac, src=self.pg3.remote_mac) /
+ IP(src=self.pg3.remote_ip4, dst=self.pg3.local_ip4) /
+ UDP(sport=12346, dport=12345) /
+ HANATStateSync(sequence_number=2, events=[
+ Event(event_type='del', protocol='udp',
+ in_addr=self.pg0.remote_ip4, out_addr=self.nat_addr,
+ in_port=self.udp_port_in, out_port=self.udp_port_out,
+ eh_addr=self.pg1.remote_ip4,
+ ehn_addr=self.pg1.remote_ip4,
+ eh_port=self.udp_external_port,
+ ehn_port=self.udp_external_port, fib_index=0)],
+ thread_index=thread_index))
+
+ self.pg3.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ # receive ACK
+ capture = self.pg3.get_capture(1)
+ p = capture[0]
+ try:
+ hanat = p[HANATStateSync]
+ except IndexError:
+ self.logger.error(ppp("Invalid packet:", p))
+ raise
+ else:
+ self.assertEqual(hanat.sequence_number, 2)
+ self.assertEqual(hanat.flags, 'ACK')
+ self.assertEqual(hanat.version, 1)
+ users = self.vapi.nat44_ei_user_dump()
+ self.assertEqual(len(users), 1)
+ self.assertEqual(str(users[0].ip_address),
+ self.pg0.remote_ip4)
+ # now we should have only 1 session, 1 deleted by HA
+ sessions = self.vapi.nat44_ei_user_session_dump(users[0].ip_address,
+ users[0].vrf_id)
+ self.assertEqual(len(sessions), 1)
+ stats = self.statistics['/nat44-ei/ha/del-event-recv']
+ self.assertEqual(stats[:, 0].sum(), 1)
+
+ stats = self.statistics.get_err_counter(
+ '/err/nat44-ei-ha/pkts-processed')
+ self.assertEqual(stats, 2)
+
+ # send HA session refresh event to failover/passive
+ p = (Ether(dst=self.pg3.local_mac, src=self.pg3.remote_mac) /
+ IP(src=self.pg3.remote_ip4, dst=self.pg3.local_ip4) /
+ UDP(sport=12346, dport=12345) /
+ HANATStateSync(sequence_number=3, events=[
+ Event(event_type='refresh', protocol='tcp',
+ in_addr=self.pg0.remote_ip4, out_addr=self.nat_addr,
+ in_port=self.tcp_port_in, out_port=self.tcp_port_out,
+ eh_addr=self.pg1.remote_ip4,
+ ehn_addr=self.pg1.remote_ip4,
+ eh_port=self.tcp_external_port,
+ ehn_port=self.tcp_external_port, fib_index=0,
+ total_bytes=1024, total_pkts=2)],
+ thread_index=thread_index))
+ self.pg3.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ # receive ACK
+ capture = self.pg3.get_capture(1)
+ p = capture[0]
+ try:
+ hanat = p[HANATStateSync]
+ except IndexError:
+ self.logger.error(ppp("Invalid packet:", p))
+ raise
+ else:
+ self.assertEqual(hanat.sequence_number, 3)
+ self.assertEqual(hanat.flags, 'ACK')
+ self.assertEqual(hanat.version, 1)
+ users = self.vapi.nat44_ei_user_dump()
+ self.assertEqual(len(users), 1)
+ self.assertEqual(str(users[0].ip_address),
+ self.pg0.remote_ip4)
+ sessions = self.vapi.nat44_ei_user_session_dump(
+ users[0].ip_address, users[0].vrf_id)
+ self.assertEqual(len(sessions), 1)
+ session = sessions[0]
+ self.assertEqual(session.total_bytes, 1024)
+ self.assertEqual(session.total_pkts, 2)
+ stats = self.statistics['/nat44-ei/ha/refresh-event-recv']
+ self.assertEqual(stats[:, 0].sum(), 1)
+
+ stats = self.statistics.get_err_counter(
+ '/err/nat44-ei-ha/pkts-processed')
+ self.assertEqual(stats, 3)
+
+ # send packet to test session created by HA
+ p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ TCP(sport=self.tcp_external_port, dport=self.tcp_port_out))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ except IndexError:
+ self.logger.error(ppp("Invalid packet:", p))
+ raise
+ else:
+ self.assertEqual(ip.src, self.pg1.remote_ip4)
+ self.assertEqual(ip.dst, self.pg0.remote_ip4)
+ self.assertEqual(tcp.sport, self.tcp_external_port)
+ self.assertEqual(tcp.dport, self.tcp_port_in)
+
+ def reconfigure_frame_queue_nelts(self, frame_queue_nelts):
+ self.vapi.nat44_ei_plugin_enable_disable(enable=0)
+ self.vapi.nat44_ei_set_fq_options(frame_queue_nelts=frame_queue_nelts)
+ # keep plugin configuration persistent
+ self.plugin_enable()
+ return self.vapi.nat44_ei_show_fq_options().frame_queue_nelts
+
+ def test_set_frame_queue_nelts(self):
+ """ NAT44 EI API test - worker handoff frame queue elements """
+ self.assertEqual(self.reconfigure_frame_queue_nelts(512), 512)
+
+ def show_commands_at_teardown(self):
+ self.logger.info(self.vapi.cli("show nat44 ei timeouts"))
+ self.logger.info(self.vapi.cli("show nat44 ei addresses"))
+ self.logger.info(self.vapi.cli("show nat44 ei interfaces"))
+ self.logger.info(self.vapi.cli("show nat44 ei static mappings"))
+ self.logger.info(self.vapi.cli("show nat44 ei interface address"))
+ self.logger.info(self.vapi.cli("show nat44 ei sessions detail"))
+ self.logger.info(self.vapi.cli("show nat44 ei hash tables detail"))
+ self.logger.info(self.vapi.cli("show nat44 ei ha"))
+ self.logger.info(
+ self.vapi.cli("show nat44 ei addr-port-assignment-alg"))
+
+ def test_outside_address_distribution(self):
+ """ Outside address distribution based on source address """
+
+ x = 100
+ nat_addresses = []
+
+ for i in range(1, x):
+ a = "10.0.0.%d" % i
+ nat_addresses.append(a)
+
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ self.vapi.nat44_ei_add_del_address_range(
+ first_ip_address=nat_addresses[0],
+ last_ip_address=nat_addresses[-1],
+ vrf_id=0xFFFFFFFF, is_add=1)
+
+ self.pg0.generate_remote_hosts(x)
+
+ pkts = []
+ for i in range(x):
+ info = self.create_packet_info(self.pg0, self.pg1)
+ payload = self.info_to_payload(info)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_hosts[i].ip4,
+ dst=self.pg1.remote_ip4) /
+ UDP(sport=7000+i, dport=8000+i) /
+ Raw(payload))
+ info.data = p
+ pkts.append(p)
+
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ recvd = self.pg1.get_capture(len(pkts))
+ for p_recvd in recvd:
+ payload_info = self.payload_to_info(p_recvd[Raw])
+ packet_index = payload_info.index
+ info = self._packet_infos[packet_index]
+ self.assertTrue(info is not None)
+ self.assertEqual(packet_index, info.index)
+ p_sent = info.data
+ packed = socket.inet_aton(p_sent[IP].src)
+ numeric = struct.unpack("!L", packed)[0]
+ numeric = socket.htonl(numeric)
+ a = nat_addresses[(numeric-1) % len(nat_addresses)]
+ self.assertEqual(
+ a, p_recvd[IP].src,
+ "Invalid packet (src IP %s translated to %s, but expected %s)"
+ % (p_sent[IP].src, p_recvd[IP].src, a))
+
+
+class TestNAT44Out2InDPO(MethodHolder):
+ """ NAT44EI Test Cases using out2in DPO """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestNAT44Out2InDPO, cls).setUpClass()
+ cls.vapi.cli("set log class nat44-ei level debug")
+
+ cls.tcp_port_in = 6303
+ cls.tcp_port_out = 6303
+ cls.udp_port_in = 6304
+ cls.udp_port_out = 6304
+ cls.icmp_id_in = 6305
+ cls.icmp_id_out = 6305
+ cls.nat_addr = '10.0.0.3'
+ cls.dst_ip4 = '192.168.70.1'
+
+ cls.create_pg_interfaces(range(2))
+
+ cls.pg0.admin_up()
+ cls.pg0.config_ip4()
+ cls.pg0.resolve_arp()
+
+ cls.pg1.admin_up()
+ cls.pg1.config_ip6()
+ cls.pg1.resolve_ndp()
+
+ r1 = VppIpRoute(cls, "::", 0,
+ [VppRoutePath(cls.pg1.remote_ip6,
+ cls.pg1.sw_if_index)],
+ register=False)
+ r1.add_vpp_config()
+
+ def setUp(self):
+ super(TestNAT44Out2InDPO, self).setUp()
+ flags = self.config_flags.NAT44_EI_OUT2IN_DPO
+ self.vapi.nat44_ei_plugin_enable_disable(enable=1, flags=flags)
+
+ def tearDown(self):
+ super(TestNAT44Out2InDPO, self).tearDown()
+ if not self.vpp_dead:
+ self.vapi.nat44_ei_plugin_enable_disable(enable=0)
+ self.vapi.cli("clear logging")
+
+ def configure_xlat(self):
+ self.dst_ip6_pfx = '1:2:3::'
+ self.dst_ip6_pfx_n = socket.inet_pton(socket.AF_INET6,
+ self.dst_ip6_pfx)
+ self.dst_ip6_pfx_len = 96
+ self.src_ip6_pfx = '4:5:6::'
+ self.src_ip6_pfx_n = socket.inet_pton(socket.AF_INET6,
+ self.src_ip6_pfx)
+ self.src_ip6_pfx_len = 96
+ self.vapi.map_add_domain(self.dst_ip6_pfx_n, self.dst_ip6_pfx_len,
+ self.src_ip6_pfx_n, self.src_ip6_pfx_len,
+ '\x00\x00\x00\x00', 0)
+
+ @unittest.skip('Temporary disabled')
+ def test_464xlat_ce(self):
+ """ Test 464XLAT CE with NAT44EI """
+
+ self.configure_xlat()
+
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_add_del_address_range(
+ first_ip_address=self.nat_addr_n,
+ last_ip_address=self.nat_addr_n,
+ vrf_id=0xFFFFFFFF, is_add=1)
+
+ out_src_ip6 = self.compose_ip6(self.dst_ip4, self.dst_ip6_pfx,
+ self.dst_ip6_pfx_len)
+ out_dst_ip6 = self.compose_ip6(self.nat_addr, self.src_ip6_pfx,
+ self.src_ip6_pfx_len)
+
+ try:
+ pkts = self.create_stream_in(self.pg0, self.pg1, self.dst_ip4)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out_ip6(capture, nat_ip=out_dst_ip6,
+ dst_ip=out_src_ip6)
+
+ pkts = self.create_stream_out_ip6(self.pg1, out_src_ip6,
+ out_dst_ip6)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg0)
+ finally:
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags)
+ self.vapi.nat44_ei_add_del_address_range(
+ first_ip_address=self.nat_addr_n,
+ last_ip_address=self.nat_addr_n,
+ vrf_id=0xFFFFFFFF)
+
+ @unittest.skip('Temporary disabled')
+ def test_464xlat_ce_no_nat(self):
+ """ Test 464XLAT CE without NAT44EI """
+
+ self.configure_xlat()
+
+ out_src_ip6 = self.compose_ip6(self.dst_ip4, self.dst_ip6_pfx,
+ self.dst_ip6_pfx_len)
+ out_dst_ip6 = self.compose_ip6(self.pg0.remote_ip4, self.src_ip6_pfx,
+ self.src_ip6_pfx_len)
+
+ pkts = self.create_stream_in(self.pg0, self.pg1, self.dst_ip4)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out_ip6(capture, dst_ip=out_src_ip6,
+ nat_ip=out_dst_ip6, same_port=True)
+
+ pkts = self.create_stream_out_ip6(self.pg1, out_src_ip6, out_dst_ip6)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in(capture, self.pg0)
+
+
+class TestNAT44EIMW(MethodHolder):
+ """ NAT44EI Test Cases (multiple workers) """
+ vpp_worker_count = 2
+ max_translations = 10240
+ max_users = 10240
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestNAT44EIMW, cls).setUpClass()
+ cls.vapi.cli("set log class nat level debug")
+
+ cls.tcp_port_in = 6303
+ cls.tcp_port_out = 6303
+ cls.udp_port_in = 6304
+ cls.udp_port_out = 6304
+ cls.icmp_id_in = 6305
+ cls.icmp_id_out = 6305
+ cls.nat_addr = '10.0.0.3'
+ cls.ipfix_src_port = 4739
+ cls.ipfix_domain_id = 1
+ cls.tcp_external_port = 80
+ cls.udp_external_port = 69
+
+ cls.create_pg_interfaces(range(10))
+ cls.interfaces = list(cls.pg_interfaces[0:4])
+
+ for i in cls.interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+
+ cls.pg0.generate_remote_hosts(3)
+ cls.pg0.configure_ipv4_neighbors()
+
+ cls.pg1.generate_remote_hosts(1)
+ cls.pg1.configure_ipv4_neighbors()
+
+ cls.overlapping_interfaces = list(list(cls.pg_interfaces[4:7]))
+ cls.vapi.ip_table_add_del(is_add=1, table={'table_id': 10})
+ cls.vapi.ip_table_add_del(is_add=1, table={'table_id': 20})
+
+ cls.pg4._local_ip4 = "172.16.255.1"
+ cls.pg4._remote_hosts[0]._ip4 = "172.16.255.2"
+ cls.pg4.set_table_ip4(10)
+ cls.pg5._local_ip4 = "172.17.255.3"
+ cls.pg5._remote_hosts[0]._ip4 = "172.17.255.4"
+ cls.pg5.set_table_ip4(10)
+ cls.pg6._local_ip4 = "172.16.255.1"
+ cls.pg6._remote_hosts[0]._ip4 = "172.16.255.2"
+ cls.pg6.set_table_ip4(20)
+ for i in cls.overlapping_interfaces:
+ i.config_ip4()
+ i.admin_up()
+ i.resolve_arp()
+
+ cls.pg7.admin_up()
+ cls.pg8.admin_up()
+
+ cls.pg9.generate_remote_hosts(2)
+ cls.pg9.config_ip4()
+ cls.vapi.sw_interface_add_del_address(
+ sw_if_index=cls.pg9.sw_if_index,
+ prefix="10.0.0.1/24")
+
+ cls.pg9.admin_up()
+ cls.pg9.resolve_arp()
+ cls.pg9._remote_hosts[1]._ip4 = cls.pg9._remote_hosts[0]._ip4
+ cls.pg4._remote_ip4 = cls.pg9._remote_hosts[0]._ip4 = "10.0.0.2"
+ cls.pg9.resolve_arp()
+
+ def setUp(self):
+ super(TestNAT44EIMW, self).setUp()
+ self.vapi.nat44_ei_plugin_enable_disable(
+ sessions=self.max_translations,
+ users=self.max_users, enable=1)
+
+ def tearDown(self):
+ super(TestNAT44EIMW, self).tearDown()
+ if not self.vpp_dead:
+ self.vapi.nat44_ei_ipfix_enable_disable(
+ domain_id=self.ipfix_domain_id,
+ src_port=self.ipfix_src_port,
+ enable=0)
+ self.ipfix_src_port = 4739
+ self.ipfix_domain_id = 1
+
+ self.vapi.nat44_ei_plugin_enable_disable(enable=0)
+ self.vapi.cli("clear logging")
+
+ def test_hairpinning(self):
+ """ NAT44EI hairpinning - 1:1 NAPT """
+
+ host = self.pg0.remote_hosts[0]
+ server = self.pg0.remote_hosts[1]
+ host_in_port = 1234
+ host_out_port = 0
+ server_in_port = 5678
+ server_out_port = 8765
+ worker_1 = 1
+ worker_2 = 2
+
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ # add static mapping for server
+ self.nat44_add_static_mapping(server.ip4, self.nat_addr,
+ server_in_port, server_out_port,
+ proto=IP_PROTOS.tcp)
+
+ cnt = self.statistics['/nat44-ei/hairpinning']
+ # send packet from host to server
+ p = (Ether(src=host.mac, dst=self.pg0.local_mac) /
+ IP(src=host.ip4, dst=self.nat_addr) /
+ TCP(sport=host_in_port, dport=server_out_port))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.nat_addr)
+ self.assertEqual(ip.dst, server.ip4)
+ self.assertNotEqual(tcp.sport, host_in_port)
+ self.assertEqual(tcp.dport, server_in_port)
+ self.assert_packet_checksums_valid(p)
+ host_out_port = tcp.sport
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ after = self.statistics['/nat44-ei/hairpinning']
+
+ if_idx = self.pg0.sw_if_index
+ self.assertEqual(after[worker_2][if_idx] - cnt[worker_1][if_idx], 1)
+
+ # send reply from server to host
+ p = (Ether(src=server.mac, dst=self.pg0.local_mac) /
+ IP(src=server.ip4, dst=self.nat_addr) /
+ TCP(sport=server_in_port, dport=host_out_port))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.nat_addr)
+ self.assertEqual(ip.dst, host.ip4)
+ self.assertEqual(tcp.sport, server_out_port)
+ self.assertEqual(tcp.dport, host_in_port)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ after = self.statistics['/nat44-ei/hairpinning']
+ if_idx = self.pg0.sw_if_index
+ self.assertEqual(after[worker_1][if_idx] - cnt[worker_1][if_idx], 1)
+ self.assertEqual(after[worker_2][if_idx] - cnt[worker_2][if_idx], 2)
+
+ def test_hairpinning2(self):
+ """ NAT44EI hairpinning - 1:1 NAT"""
+
+ server1_nat_ip = "10.0.0.10"
+ server2_nat_ip = "10.0.0.11"
+ host = self.pg0.remote_hosts[0]
+ server1 = self.pg0.remote_hosts[1]
+ server2 = self.pg0.remote_hosts[2]
+ server_tcp_port = 22
+ server_udp_port = 20
+
+ self.nat44_add_address(self.nat_addr)
+ flags = self.config_flags.NAT44_EI_IF_INSIDE
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg0.sw_if_index,
+ flags=flags, is_add=1)
+ self.vapi.nat44_ei_interface_add_del_feature(
+ sw_if_index=self.pg1.sw_if_index,
+ is_add=1)
+
+ # add static mapping for servers
+ self.nat44_add_static_mapping(server1.ip4, server1_nat_ip)
+ self.nat44_add_static_mapping(server2.ip4, server2_nat_ip)
+
+ # host to server1
+ pkts = []
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=host.ip4, dst=server1_nat_ip) /
+ TCP(sport=self.tcp_port_in, dport=server_tcp_port))
+ pkts.append(p)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=host.ip4, dst=server1_nat_ip) /
+ UDP(sport=self.udp_port_in, dport=server_udp_port))
+ pkts.append(p)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=host.ip4, dst=server1_nat_ip) /
+ ICMP(id=self.icmp_id_in, type='echo-request'))
+ pkts.append(p)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ for packet in capture:
+ try:
+ self.assertEqual(packet[IP].src, self.nat_addr)
+ self.assertEqual(packet[IP].dst, server1.ip4)
+ if packet.haslayer(TCP):
+ self.assertNotEqual(packet[TCP].sport, self.tcp_port_in)
+ self.assertEqual(packet[TCP].dport, server_tcp_port)
+ self.tcp_port_out = packet[TCP].sport
+ self.assert_packet_checksums_valid(packet)
+ elif packet.haslayer(UDP):
+ self.assertNotEqual(packet[UDP].sport, self.udp_port_in)
+ self.assertEqual(packet[UDP].dport, server_udp_port)
+ self.udp_port_out = packet[UDP].sport
+ else:
+ self.assertNotEqual(packet[ICMP].id, self.icmp_id_in)
+ self.icmp_id_out = packet[ICMP].id
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ # server1 to host
+ pkts = []
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=server1.ip4, dst=self.nat_addr) /
+ TCP(sport=server_tcp_port, dport=self.tcp_port_out))
+ pkts.append(p)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=server1.ip4, dst=self.nat_addr) /
+ UDP(sport=server_udp_port, dport=self.udp_port_out))
+ pkts.append(p)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=server1.ip4, dst=self.nat_addr) /
+ ICMP(id=self.icmp_id_out, type='echo-reply'))
+ pkts.append(p)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ for packet in capture:
+ try:
+ self.assertEqual(packet[IP].src, server1_nat_ip)
+ self.assertEqual(packet[IP].dst, host.ip4)
+ if packet.haslayer(TCP):
+ self.assertEqual(packet[TCP].dport, self.tcp_port_in)
+ self.assertEqual(packet[TCP].sport, server_tcp_port)
+ self.assert_packet_checksums_valid(packet)
+ elif packet.haslayer(UDP):
+ self.assertEqual(packet[UDP].dport, self.udp_port_in)
+ self.assertEqual(packet[UDP].sport, server_udp_port)
+ else:
+ self.assertEqual(packet[ICMP].id, self.icmp_id_in)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ # server2 to server1
+ pkts = []
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=server2.ip4, dst=server1_nat_ip) /
+ TCP(sport=self.tcp_port_in, dport=server_tcp_port))
+ pkts.append(p)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=server2.ip4, dst=server1_nat_ip) /
+ UDP(sport=self.udp_port_in, dport=server_udp_port))
+ pkts.append(p)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=server2.ip4, dst=server1_nat_ip) /
+ ICMP(id=self.icmp_id_in, type='echo-request'))
+ pkts.append(p)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ for packet in capture:
+ try:
+ self.assertEqual(packet[IP].src, server2_nat_ip)
+ self.assertEqual(packet[IP].dst, server1.ip4)
+ if packet.haslayer(TCP):
+ self.assertEqual(packet[TCP].sport, self.tcp_port_in)
+ self.assertEqual(packet[TCP].dport, server_tcp_port)
+ self.tcp_port_out = packet[TCP].sport
+ self.assert_packet_checksums_valid(packet)
+ elif packet.haslayer(UDP):
+ self.assertEqual(packet[UDP].sport, self.udp_port_in)
+ self.assertEqual(packet[UDP].dport, server_udp_port)
+ self.udp_port_out = packet[UDP].sport
+ else:
+ self.assertEqual(packet[ICMP].id, self.icmp_id_in)
+ self.icmp_id_out = packet[ICMP].id
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ # server1 to server2
+ pkts = []
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=server1.ip4, dst=server2_nat_ip) /
+ TCP(sport=server_tcp_port, dport=self.tcp_port_out))
+ pkts.append(p)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=server1.ip4, dst=server2_nat_ip) /
+ UDP(sport=server_udp_port, dport=self.udp_port_out))
+ pkts.append(p)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=server1.ip4, dst=server2_nat_ip) /
+ ICMP(id=self.icmp_id_out, type='echo-reply'))
+ pkts.append(p)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ for packet in capture:
+ try:
+ self.assertEqual(packet[IP].src, server1_nat_ip)
+ self.assertEqual(packet[IP].dst, server2.ip4)
+ if packet.haslayer(TCP):
+ self.assertEqual(packet[TCP].dport, self.tcp_port_in)
+ self.assertEqual(packet[TCP].sport, server_tcp_port)
+ self.assert_packet_checksums_valid(packet)
+ elif packet.haslayer(UDP):
+ self.assertEqual(packet[UDP].dport, self.udp_port_in)
+ self.assertEqual(packet[UDP].sport, server_udp_port)
+ else:
+ self.assertEqual(packet[ICMP].id, self.icmp_id_in)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_nat64.py b/test/test_nat64.py
new file mode 100644
index 00000000000..9a10b9fc380
--- /dev/null
+++ b/test/test_nat64.py
@@ -0,0 +1,1937 @@
+#!/usr/bin/env python3
+
+import ipaddress
+import random
+import socket
+import struct
+import unittest
+from io import BytesIO
+from time import sleep
+
+import scapy.compat
+from framework import tag_fixme_vpp_workers
+from framework import VppTestCase, VppTestRunner, running_extended_tests
+from ipfix import IPFIX, Set, Template, Data, IPFIXDecoder
+from scapy.data import IP_PROTOS
+from scapy.layers.inet import IP, TCP, UDP, ICMP
+from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror
+from scapy.layers.inet6 import ICMPv6DestUnreach, IPerror6, IPv6ExtHdrFragment
+from scapy.layers.inet6 import IPv6, ICMPv6EchoRequest, ICMPv6EchoReply, \
+ ICMPv6ND_NS, ICMPv6ND_NA, ICMPv6NDOptDstLLAddr, fragment6
+from scapy.layers.l2 import Ether, GRE
+from scapy.packet import Raw
+from syslog_rfc5424_parser import SyslogMessage, ParseError
+from syslog_rfc5424_parser.constants import SyslogSeverity
+from util import ppc, ppp
+from vpp_papi import VppEnum
+
+
+@tag_fixme_vpp_workers
+class TestNAT64(VppTestCase):
+ """ NAT64 Test Cases """
+
+ @property
+ def SYSLOG_SEVERITY(self):
+ return VppEnum.vl_api_syslog_severity_t
+
+ @property
+ def config_flags(self):
+ return VppEnum.vl_api_nat_config_flags_t
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestNAT64, cls).setUpClass()
+
+ cls.tcp_port_in = 6303
+ cls.tcp_port_out = 6303
+ cls.udp_port_in = 6304
+ cls.udp_port_out = 6304
+ cls.icmp_id_in = 6305
+ cls.icmp_id_out = 6305
+ cls.tcp_external_port = 80
+ cls.nat_addr = '10.0.0.3'
+ cls.nat_addr_n = socket.inet_pton(socket.AF_INET, cls.nat_addr)
+ cls.vrf1_id = 10
+ cls.vrf1_nat_addr = '10.0.10.3'
+ cls.ipfix_src_port = 4739
+ cls.ipfix_domain_id = 1
+
+ cls.create_pg_interfaces(range(6))
+ cls.ip6_interfaces = list(cls.pg_interfaces[0:1])
+ cls.ip6_interfaces.append(cls.pg_interfaces[2])
+ cls.ip4_interfaces = list(cls.pg_interfaces[1:2])
+
+ cls.vapi.ip_table_add_del(is_add=1,
+ table={'table_id': cls.vrf1_id,
+ 'is_ip6': 1})
+
+ cls.pg_interfaces[2].set_table_ip6(cls.vrf1_id)
+
+ cls.pg0.generate_remote_hosts(2)
+
+ for i in cls.ip6_interfaces:
+ i.admin_up()
+ i.config_ip6()
+ i.configure_ipv6_neighbors()
+
+ for i in cls.ip4_interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+
+ cls.pg3.admin_up()
+ cls.pg3.config_ip4()
+ cls.pg3.resolve_arp()
+ cls.pg3.config_ip6()
+ cls.pg3.configure_ipv6_neighbors()
+
+ cls.pg5.admin_up()
+ cls.pg5.config_ip6()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestNAT64, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestNAT64, self).setUp()
+ self.vapi.nat64_plugin_enable_disable(enable=1,
+ bib_buckets=128, st_buckets=256)
+
+ def tearDown(self):
+ super(TestNAT64, self).tearDown()
+ if not self.vpp_dead:
+ self.vapi.nat64_plugin_enable_disable(enable=0)
+
+ def show_commands_at_teardown(self):
+ self.logger.info(self.vapi.cli("show nat64 pool"))
+ self.logger.info(self.vapi.cli("show nat64 interfaces"))
+ self.logger.info(self.vapi.cli("show nat64 prefix"))
+ self.logger.info(self.vapi.cli("show nat64 bib all"))
+ self.logger.info(self.vapi.cli("show nat64 session table all"))
+
+ def create_stream_in_ip6(self, in_if, out_if, hlim=64, pref=None, plen=0):
+ """
+ Create IPv6 packet stream for inside network
+
+ :param in_if: Inside interface
+ :param out_if: Outside interface
+ :param ttl: Hop Limit of generated packets
+ :param pref: NAT64 prefix
+ :param plen: NAT64 prefix length
+ """
+ pkts = []
+ if pref is None:
+ dst = ''.join(['64:ff9b::', out_if.remote_ip4])
+ else:
+ dst = self.compose_ip6(out_if.remote_ip4, pref, plen)
+
+ # TCP
+ p = (Ether(dst=in_if.local_mac, src=in_if.remote_mac) /
+ IPv6(src=in_if.remote_ip6, dst=dst, hlim=hlim) /
+ TCP(sport=self.tcp_port_in, dport=20))
+ pkts.append(p)
+
+ # UDP
+ p = (Ether(dst=in_if.local_mac, src=in_if.remote_mac) /
+ IPv6(src=in_if.remote_ip6, dst=dst, hlim=hlim) /
+ UDP(sport=self.udp_port_in, dport=20))
+ pkts.append(p)
+
+ # ICMP
+ p = (Ether(dst=in_if.local_mac, src=in_if.remote_mac) /
+ IPv6(src=in_if.remote_ip6, dst=dst, hlim=hlim) /
+ ICMPv6EchoRequest(id=self.icmp_id_in))
+ pkts.append(p)
+
+ return pkts
+
+ def create_stream_out(self, out_if, dst_ip=None, ttl=64,
+ use_inside_ports=False):
+ """
+ Create packet stream for outside network
+
+ :param out_if: Outside interface
+ :param dst_ip: Destination IP address (Default use global NAT address)
+ :param ttl: TTL of generated packets
+ :param use_inside_ports: Use inside NAT ports as destination ports
+ instead of outside ports
+ """
+ if dst_ip is None:
+ dst_ip = self.nat_addr
+ if not use_inside_ports:
+ tcp_port = self.tcp_port_out
+ udp_port = self.udp_port_out
+ icmp_id = self.icmp_id_out
+ else:
+ tcp_port = self.tcp_port_in
+ udp_port = self.udp_port_in
+ icmp_id = self.icmp_id_in
+ pkts = []
+ # TCP
+ p = (Ether(dst=out_if.local_mac, src=out_if.remote_mac) /
+ IP(src=out_if.remote_ip4, dst=dst_ip, ttl=ttl) /
+ TCP(dport=tcp_port, sport=20))
+ pkts.extend([p, p])
+
+ # UDP
+ p = (Ether(dst=out_if.local_mac, src=out_if.remote_mac) /
+ IP(src=out_if.remote_ip4, dst=dst_ip, ttl=ttl) /
+ UDP(dport=udp_port, sport=20))
+ pkts.append(p)
+
+ # ICMP
+ p = (Ether(dst=out_if.local_mac, src=out_if.remote_mac) /
+ IP(src=out_if.remote_ip4, dst=dst_ip, ttl=ttl) /
+ ICMP(id=icmp_id, type='echo-reply'))
+ pkts.append(p)
+
+ return pkts
+
+ def verify_capture_out(self, capture, nat_ip=None, same_port=False,
+ dst_ip=None, is_ip6=False, ignore_port=False):
+ """
+ Verify captured packets on outside network
+
+ :param capture: Captured packets
+ :param nat_ip: Translated IP address (Default use global NAT address)
+ :param same_port: Source port number is not translated (Default False)
+ :param dst_ip: Destination IP address (Default do not verify)
+ :param is_ip6: If L3 protocol is IPv6 (Default False)
+ """
+ if is_ip6:
+ IP46 = IPv6
+ ICMP46 = ICMPv6EchoRequest
+ else:
+ IP46 = IP
+ ICMP46 = ICMP
+ if nat_ip is None:
+ nat_ip = self.nat_addr
+ for packet in capture:
+ try:
+ if not is_ip6:
+ self.assert_packet_checksums_valid(packet)
+ self.assertEqual(packet[IP46].src, nat_ip)
+ if dst_ip is not None:
+ self.assertEqual(packet[IP46].dst, dst_ip)
+ if packet.haslayer(TCP):
+ if not ignore_port:
+ if same_port:
+ self.assertEqual(
+ packet[TCP].sport, self.tcp_port_in)
+ else:
+ self.assertNotEqual(
+ packet[TCP].sport, self.tcp_port_in)
+ self.tcp_port_out = packet[TCP].sport
+ self.assert_packet_checksums_valid(packet)
+ elif packet.haslayer(UDP):
+ if not ignore_port:
+ if same_port:
+ self.assertEqual(
+ packet[UDP].sport, self.udp_port_in)
+ else:
+ self.assertNotEqual(
+ packet[UDP].sport, self.udp_port_in)
+ self.udp_port_out = packet[UDP].sport
+ else:
+ if not ignore_port:
+ if same_port:
+ self.assertEqual(
+ packet[ICMP46].id, self.icmp_id_in)
+ else:
+ self.assertNotEqual(
+ packet[ICMP46].id, self.icmp_id_in)
+ self.icmp_id_out = packet[ICMP46].id
+ self.assert_packet_checksums_valid(packet)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet "
+ "(outside network):", packet))
+ raise
+
+ def verify_capture_in_ip6(self, capture, src_ip, dst_ip):
+ """
+ Verify captured IPv6 packets on inside network
+
+ :param capture: Captured packets
+ :param src_ip: Source IP
+ :param dst_ip: Destination IP address
+ """
+ for packet in capture:
+ try:
+ self.assertEqual(packet[IPv6].src, src_ip)
+ self.assertEqual(packet[IPv6].dst, dst_ip)
+ self.assert_packet_checksums_valid(packet)
+ if packet.haslayer(TCP):
+ self.assertEqual(packet[TCP].dport, self.tcp_port_in)
+ elif packet.haslayer(UDP):
+ self.assertEqual(packet[UDP].dport, self.udp_port_in)
+ else:
+ self.assertEqual(packet[ICMPv6EchoReply].id,
+ self.icmp_id_in)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet "
+ "(inside network):", packet))
+ raise
+
+ def create_stream_frag(self, src_if, dst, sport, dport, data,
+ proto=IP_PROTOS.tcp, echo_reply=False):
+ """
+ Create fragmented packet stream
+
+ :param src_if: Source interface
+ :param dst: Destination IPv4 address
+ :param sport: Source port
+ :param dport: Destination port
+ :param data: Payload data
+ :param proto: protocol (TCP, UDP, ICMP)
+ :param echo_reply: use echo_reply if protocol is ICMP
+ :returns: Fragments
+ """
+ if proto == IP_PROTOS.tcp:
+ p = (IP(src=src_if.remote_ip4, dst=dst) /
+ TCP(sport=sport, dport=dport) /
+ Raw(data))
+ p = p.__class__(scapy.compat.raw(p))
+ chksum = p[TCP].chksum
+ proto_header = TCP(sport=sport, dport=dport, chksum=chksum)
+ elif proto == IP_PROTOS.udp:
+ proto_header = UDP(sport=sport, dport=dport)
+ elif proto == IP_PROTOS.icmp:
+ if not echo_reply:
+ proto_header = ICMP(id=sport, type='echo-request')
+ else:
+ proto_header = ICMP(id=sport, type='echo-reply')
+ else:
+ raise Exception("Unsupported protocol")
+ id = random.randint(0, 65535)
+ pkts = []
+ if proto == IP_PROTOS.tcp:
+ raw = Raw(data[0:4])
+ else:
+ raw = Raw(data[0:16])
+ p = (Ether(src=src_if.remote_mac, dst=src_if.local_mac) /
+ IP(src=src_if.remote_ip4, dst=dst, flags="MF", frag=0, id=id) /
+ proto_header /
+ raw)
+ pkts.append(p)
+ if proto == IP_PROTOS.tcp:
+ raw = Raw(data[4:20])
+ else:
+ raw = Raw(data[16:32])
+ p = (Ether(src=src_if.remote_mac, dst=src_if.local_mac) /
+ IP(src=src_if.remote_ip4, dst=dst, flags="MF", frag=3, id=id,
+ proto=proto) /
+ raw)
+ pkts.append(p)
+ if proto == IP_PROTOS.tcp:
+ raw = Raw(data[20:])
+ else:
+ raw = Raw(data[32:])
+ p = (Ether(src=src_if.remote_mac, dst=src_if.local_mac) /
+ IP(src=src_if.remote_ip4, dst=dst, frag=5, proto=proto,
+ id=id) /
+ raw)
+ pkts.append(p)
+ return pkts
+
+ def create_stream_frag_ip6(self, src_if, dst, sport, dport, data,
+ pref=None, plen=0, frag_size=128):
+ """
+ Create fragmented packet stream
+
+ :param src_if: Source interface
+ :param dst: Destination IPv4 address
+ :param sport: Source TCP port
+ :param dport: Destination TCP port
+ :param data: Payload data
+ :param pref: NAT64 prefix
+ :param plen: NAT64 prefix length
+ :param fragsize: size of fragments
+ :returns: Fragments
+ """
+ if pref is None:
+ dst_ip6 = ''.join(['64:ff9b::', dst])
+ else:
+ dst_ip6 = self.compose_ip6(dst, pref, plen)
+
+ p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ IPv6(src=src_if.remote_ip6, dst=dst_ip6) /
+ IPv6ExtHdrFragment(id=random.randint(0, 65535)) /
+ TCP(sport=sport, dport=dport) /
+ Raw(data))
+
+ return fragment6(p, frag_size)
+
+ def reass_frags_and_verify(self, frags, src, dst):
+ """
+ Reassemble and verify fragmented packet
+
+ :param frags: Captured fragments
+ :param src: Source IPv4 address to verify
+ :param dst: Destination IPv4 address to verify
+
+ :returns: Reassembled IPv4 packet
+ """
+ buffer = BytesIO()
+ for p in frags:
+ self.assertEqual(p[IP].src, src)
+ self.assertEqual(p[IP].dst, dst)
+ self.assert_ip_checksum_valid(p)
+ buffer.seek(p[IP].frag * 8)
+ buffer.write(bytes(p[IP].payload))
+ ip = IP(src=frags[0][IP].src, dst=frags[0][IP].dst,
+ proto=frags[0][IP].proto)
+ if ip.proto == IP_PROTOS.tcp:
+ p = (ip / TCP(buffer.getvalue()))
+ self.logger.debug(ppp("Reassembled:", p))
+ self.assert_tcp_checksum_valid(p)
+ elif ip.proto == IP_PROTOS.udp:
+ p = (ip / UDP(buffer.getvalue()[:8]) /
+ Raw(buffer.getvalue()[8:]))
+ elif ip.proto == IP_PROTOS.icmp:
+ p = (ip / ICMP(buffer.getvalue()))
+ return p
+
+ def reass_frags_and_verify_ip6(self, frags, src, dst):
+ """
+ Reassemble and verify fragmented packet
+
+ :param frags: Captured fragments
+ :param src: Source IPv6 address to verify
+ :param dst: Destination IPv6 address to verify
+
+ :returns: Reassembled IPv6 packet
+ """
+ buffer = BytesIO()
+ for p in frags:
+ self.assertEqual(p[IPv6].src, src)
+ self.assertEqual(p[IPv6].dst, dst)
+ buffer.seek(p[IPv6ExtHdrFragment].offset * 8)
+ buffer.write(bytes(p[IPv6ExtHdrFragment].payload))
+ ip = IPv6(src=frags[0][IPv6].src, dst=frags[0][IPv6].dst,
+ nh=frags[0][IPv6ExtHdrFragment].nh)
+ if ip.nh == IP_PROTOS.tcp:
+ p = (ip / TCP(buffer.getvalue()))
+ elif ip.nh == IP_PROTOS.udp:
+ p = (ip / UDP(buffer.getvalue()))
+ self.logger.debug(ppp("Reassembled:", p))
+ self.assert_packet_checksums_valid(p)
+ return p
+
+ def verify_ipfix_max_bibs(self, data, limit):
+ """
+ Verify IPFIX maximum BIB entries exceeded event
+
+ :param data: Decoded IPFIX data records
+ :param limit: Number of maximum BIB entries that can be created.
+ """
+ self.assertEqual(1, len(data))
+ record = data[0]
+ # natEvent
+ self.assertEqual(scapy.compat.orb(record[230]), 13)
+ # natQuotaExceededEvent
+ self.assertEqual(struct.pack("I", 2), record[466])
+ # maxBIBEntries
+ self.assertEqual(struct.pack("I", limit), record[472])
+
+ def verify_ipfix_bib(self, data, is_create, src_addr):
+ """
+ Verify IPFIX NAT64 BIB create and delete events
+
+ :param data: Decoded IPFIX data records
+ :param is_create: Create event if nonzero value otherwise delete event
+ :param src_addr: IPv6 source address
+ """
+ self.assertEqual(1, len(data))
+ record = data[0]
+ # natEvent
+ if is_create:
+ self.assertEqual(scapy.compat.orb(record[230]), 10)
+ else:
+ self.assertEqual(scapy.compat.orb(record[230]), 11)
+ # sourceIPv6Address
+ self.assertEqual(src_addr, str(ipaddress.IPv6Address(record[27])))
+ # postNATSourceIPv4Address
+ self.assertEqual(self.nat_addr_n, record[225])
+ # protocolIdentifier
+ self.assertEqual(IP_PROTOS.tcp, scapy.compat.orb(record[4]))
+ # ingressVRFID
+ self.assertEqual(struct.pack("!I", 0), record[234])
+ # sourceTransportPort
+ self.assertEqual(struct.pack("!H", self.tcp_port_in), record[7])
+ # postNAPTSourceTransportPort
+ self.assertEqual(struct.pack("!H", self.tcp_port_out), record[227])
+
+ def verify_ipfix_nat64_ses(self, data, is_create, src_addr, dst_addr,
+ dst_port):
+ """
+ Verify IPFIX NAT64 session create and delete events
+
+ :param data: Decoded IPFIX data records
+ :param is_create: Create event if nonzero value otherwise delete event
+ :param src_addr: IPv6 source address
+ :param dst_addr: IPv4 destination address
+ :param dst_port: destination TCP port
+ """
+ self.assertEqual(1, len(data))
+ record = data[0]
+ # natEvent
+ if is_create:
+ self.assertEqual(scapy.compat.orb(record[230]), 6)
+ else:
+ self.assertEqual(scapy.compat.orb(record[230]), 7)
+ # sourceIPv6Address
+ self.assertEqual(src_addr, str(ipaddress.IPv6Address(record[27])))
+ # destinationIPv6Address
+ self.assertEqual(socket.inet_pton(socket.AF_INET6,
+ self.compose_ip6(dst_addr,
+ '64:ff9b::',
+ 96)),
+ record[28])
+ # postNATSourceIPv4Address
+ self.assertEqual(self.nat_addr_n, record[225])
+ # postNATDestinationIPv4Address
+ self.assertEqual(socket.inet_pton(socket.AF_INET, dst_addr),
+ record[226])
+ # protocolIdentifier
+ self.assertEqual(IP_PROTOS.tcp, scapy.compat.orb(record[4]))
+ # ingressVRFID
+ self.assertEqual(struct.pack("!I", 0), record[234])
+ # sourceTransportPort
+ self.assertEqual(struct.pack("!H", self.tcp_port_in), record[7])
+ # postNAPTSourceTransportPort
+ self.assertEqual(struct.pack("!H", self.tcp_port_out), record[227])
+ # destinationTransportPort
+ self.assertEqual(struct.pack("!H", dst_port), record[11])
+ # postNAPTDestinationTransportPort
+ self.assertEqual(struct.pack("!H", dst_port), record[228])
+
+ def verify_syslog_sess(self, data, is_add=True, is_ip6=False):
+ message = data.decode('utf-8')
+ try:
+ message = SyslogMessage.parse(message)
+ except ParseError as e:
+ self.logger.error(e)
+ raise
+ else:
+ self.assertEqual(message.severity, SyslogSeverity.info)
+ self.assertEqual(message.appname, 'NAT')
+ self.assertEqual(message.msgid, 'SADD' if is_add else 'SDEL')
+ sd_params = message.sd.get('nsess')
+ self.assertTrue(sd_params is not None)
+ if is_ip6:
+ self.assertEqual(sd_params.get('IATYP'), 'IPv6')
+ self.assertEqual(sd_params.get('ISADDR'), self.pg0.remote_ip6)
+ else:
+ self.assertEqual(sd_params.get('IATYP'), 'IPv4')
+ self.assertEqual(sd_params.get('ISADDR'), self.pg0.remote_ip4)
+ self.assertTrue(sd_params.get('SSUBIX') is not None)
+ self.assertEqual(sd_params.get('ISPORT'), "%d" % self.tcp_port_in)
+ self.assertEqual(sd_params.get('XATYP'), 'IPv4')
+ self.assertEqual(sd_params.get('XSADDR'), self.nat_addr)
+ self.assertEqual(sd_params.get('XSPORT'), "%d" % self.tcp_port_out)
+ self.assertEqual(sd_params.get('PROTO'), "%d" % IP_PROTOS.tcp)
+ self.assertEqual(sd_params.get('SVLAN'), '0')
+ self.assertEqual(sd_params.get('XDADDR'), self.pg1.remote_ip4)
+ self.assertEqual(sd_params.get('XDPORT'),
+ "%d" % self.tcp_external_port)
+
+ def compose_ip6(self, ip4, pref, plen):
+ """
+ Compose IPv4-embedded IPv6 addresses
+
+ :param ip4: IPv4 address
+ :param pref: IPv6 prefix
+ :param plen: IPv6 prefix length
+ :returns: IPv4-embedded IPv6 addresses
+ """
+ pref_n = list(socket.inet_pton(socket.AF_INET6, pref))
+ ip4_n = list(socket.inet_pton(socket.AF_INET, ip4))
+ if plen == 32:
+ pref_n[4] = ip4_n[0]
+ pref_n[5] = ip4_n[1]
+ pref_n[6] = ip4_n[2]
+ pref_n[7] = ip4_n[3]
+ elif plen == 40:
+ pref_n[5] = ip4_n[0]
+ pref_n[6] = ip4_n[1]
+ pref_n[7] = ip4_n[2]
+ pref_n[9] = ip4_n[3]
+ elif plen == 48:
+ pref_n[6] = ip4_n[0]
+ pref_n[7] = ip4_n[1]
+ pref_n[9] = ip4_n[2]
+ pref_n[10] = ip4_n[3]
+ elif plen == 56:
+ pref_n[7] = ip4_n[0]
+ pref_n[9] = ip4_n[1]
+ pref_n[10] = ip4_n[2]
+ pref_n[11] = ip4_n[3]
+ elif plen == 64:
+ pref_n[9] = ip4_n[0]
+ pref_n[10] = ip4_n[1]
+ pref_n[11] = ip4_n[2]
+ pref_n[12] = ip4_n[3]
+ elif plen == 96:
+ pref_n[12] = ip4_n[0]
+ pref_n[13] = ip4_n[1]
+ pref_n[14] = ip4_n[2]
+ pref_n[15] = ip4_n[3]
+ packed_pref_n = b''.join([scapy.compat.chb(x) for x in pref_n])
+ return socket.inet_ntop(socket.AF_INET6, packed_pref_n)
+
+ def verify_ipfix_max_sessions(self, data, limit):
+ """
+ Verify IPFIX maximum session entries exceeded event
+
+ :param data: Decoded IPFIX data records
+ :param limit: Number of maximum session entries that can be created.
+ """
+ self.assertEqual(1, len(data))
+ record = data[0]
+ # natEvent
+ self.assertEqual(scapy.compat.orb(record[230]), 13)
+ # natQuotaExceededEvent
+ self.assertEqual(struct.pack("I", 1), record[466])
+ # maxSessionEntries
+ self.assertEqual(struct.pack("I", limit), record[471])
+
+ def test_nat64_inside_interface_handles_neighbor_advertisement(self):
+ """ NAT64 inside interface handles Neighbor Advertisement """
+
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat64_add_del_interface(is_add=1, flags=flags,
+ sw_if_index=self.pg5.sw_if_index)
+
+ # Try to send ping
+ ping = (Ether(dst=self.pg5.local_mac, src=self.pg5.remote_mac) /
+ IPv6(src=self.pg5.remote_ip6, dst=self.pg5.local_ip6) /
+ ICMPv6EchoRequest())
+ pkts = [ping]
+ self.pg5.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # Wait for Neighbor Solicitation
+ capture = self.pg5.get_capture(len(pkts))
+ packet = capture[0]
+ try:
+ self.assertEqual(packet[IPv6].src, self.pg5.local_ip6)
+ self.assertEqual(packet.haslayer(ICMPv6ND_NS), 1)
+ tgt = packet[ICMPv6ND_NS].tgt
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ # Send Neighbor Advertisement
+ p = (Ether(dst=self.pg5.local_mac, src=self.pg5.remote_mac) /
+ IPv6(src=self.pg5.remote_ip6, dst=self.pg5.local_ip6) /
+ ICMPv6ND_NA(tgt=tgt) /
+ ICMPv6NDOptDstLLAddr(lladdr=self.pg5.remote_mac))
+ pkts = [p]
+ self.pg5.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # Try to send ping again
+ pkts = [ping]
+ self.pg5.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # Wait for ping reply
+ capture = self.pg5.get_capture(len(pkts))
+ packet = capture[0]
+ try:
+ self.assertEqual(packet[IPv6].src, self.pg5.local_ip6)
+ self.assertEqual(packet[IPv6].dst, self.pg5.remote_ip6)
+ self.assertEqual(packet.haslayer(ICMPv6EchoReply), 1)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ def test_pool(self):
+ """ Add/delete address to NAT64 pool """
+ nat_addr = '1.2.3.4'
+
+ self.vapi.nat64_add_del_pool_addr_range(start_addr=nat_addr,
+ end_addr=nat_addr,
+ vrf_id=0xFFFFFFFF, is_add=1)
+
+ addresses = self.vapi.nat64_pool_addr_dump()
+ self.assertEqual(len(addresses), 1)
+ self.assertEqual(str(addresses[0].address), nat_addr)
+
+ self.vapi.nat64_add_del_pool_addr_range(start_addr=nat_addr,
+ end_addr=nat_addr,
+ vrf_id=0xFFFFFFFF, is_add=0)
+
+ addresses = self.vapi.nat64_pool_addr_dump()
+ self.assertEqual(len(addresses), 0)
+
+ def test_interface(self):
+ """ Enable/disable NAT64 feature on the interface """
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat64_add_del_interface(is_add=1, flags=flags,
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.nat64_add_del_interface(is_add=1, flags=0,
+ sw_if_index=self.pg1.sw_if_index)
+
+ interfaces = self.vapi.nat64_interface_dump()
+ self.assertEqual(len(interfaces), 2)
+ pg0_found = False
+ pg1_found = False
+ for intf in interfaces:
+ if intf.sw_if_index == self.pg0.sw_if_index:
+ self.assertEqual(intf.flags, self.config_flags.NAT_IS_INSIDE)
+ pg0_found = True
+ elif intf.sw_if_index == self.pg1.sw_if_index:
+ self.assertEqual(intf.flags, self.config_flags.NAT_IS_OUTSIDE)
+ pg1_found = True
+ self.assertTrue(pg0_found)
+ self.assertTrue(pg1_found)
+
+ features = self.vapi.cli("show interface features pg0")
+ self.assertIn('nat64-in2out', features)
+ features = self.vapi.cli("show interface features pg1")
+ self.assertIn('nat64-out2in', features)
+
+ self.vapi.nat64_add_del_interface(is_add=0, flags=flags,
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.nat64_add_del_interface(is_add=0, flags=flags,
+ sw_if_index=self.pg1.sw_if_index)
+
+ interfaces = self.vapi.nat64_interface_dump()
+ self.assertEqual(len(interfaces), 0)
+
+ def test_static_bib(self):
+ """ Add/delete static BIB entry """
+ in_addr = '2001:db8:85a3::8a2e:370:7334'
+ out_addr = '10.1.1.3'
+ in_port = 1234
+ out_port = 5678
+ proto = IP_PROTOS.tcp
+
+ self.vapi.nat64_add_del_static_bib(i_addr=in_addr, o_addr=out_addr,
+ i_port=in_port, o_port=out_port,
+ proto=proto, vrf_id=0, is_add=1)
+ bib = self.vapi.nat64_bib_dump(proto=IP_PROTOS.tcp)
+ static_bib_num = 0
+ for bibe in bib:
+ if bibe.flags & self.config_flags.NAT_IS_STATIC:
+ static_bib_num += 1
+ self.assertEqual(str(bibe.i_addr), in_addr)
+ self.assertEqual(str(bibe.o_addr), out_addr)
+ self.assertEqual(bibe.i_port, in_port)
+ self.assertEqual(bibe.o_port, out_port)
+ self.assertEqual(static_bib_num, 1)
+ bibs = self.statistics.get_counter('/nat64/total-bibs')
+ self.assertEqual(bibs[0][0], 1)
+
+ self.vapi.nat64_add_del_static_bib(i_addr=in_addr, o_addr=out_addr,
+ i_port=in_port, o_port=out_port,
+ proto=proto, vrf_id=0, is_add=0)
+ bib = self.vapi.nat64_bib_dump(proto=IP_PROTOS.tcp)
+ static_bib_num = 0
+ for bibe in bib:
+ if bibe.flags & self.config_flags.NAT_IS_STATIC:
+ static_bib_num += 1
+ self.assertEqual(static_bib_num, 0)
+ bibs = self.statistics.get_counter('/nat64/total-bibs')
+ self.assertEqual(bibs[0][0], 0)
+
+ def test_set_timeouts(self):
+ """ Set NAT64 timeouts """
+ # verify default values
+ timeouts = self.vapi.nat64_get_timeouts()
+ self.assertEqual(timeouts.udp, 300)
+ self.assertEqual(timeouts.icmp, 60)
+ self.assertEqual(timeouts.tcp_transitory, 240)
+ self.assertEqual(timeouts.tcp_established, 7440)
+
+ # set and verify custom values
+ self.vapi.nat64_set_timeouts(udp=200, tcp_established=7450,
+ tcp_transitory=250, icmp=30)
+ timeouts = self.vapi.nat64_get_timeouts()
+ self.assertEqual(timeouts.udp, 200)
+ self.assertEqual(timeouts.icmp, 30)
+ self.assertEqual(timeouts.tcp_transitory, 250)
+ self.assertEqual(timeouts.tcp_established, 7450)
+
+ def test_dynamic(self):
+ """ NAT64 dynamic translation test """
+ self.tcp_port_in = 6303
+ self.udp_port_in = 6304
+ self.icmp_id_in = 6305
+
+ ses_num_start = self.nat64_get_ses_num()
+
+ self.vapi.nat64_add_del_pool_addr_range(start_addr=self.nat_addr,
+ end_addr=self.nat_addr,
+ vrf_id=0xFFFFFFFF,
+ is_add=1)
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat64_add_del_interface(is_add=1, flags=flags,
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.nat64_add_del_interface(is_add=1, flags=0,
+ sw_if_index=self.pg1.sw_if_index)
+
+ # in2out
+ tcpn = self.statistics.get_counter('/nat64/in2out/tcp')[0]
+ udpn = self.statistics.get_counter('/nat64/in2out/udp')[0]
+ icmpn = self.statistics.get_counter('/nat64/in2out/icmp')[0]
+ drops = self.statistics.get_counter('/nat64/in2out/drops')[0]
+
+ pkts = self.create_stream_in_ip6(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture, nat_ip=self.nat_addr,
+ dst_ip=self.pg1.remote_ip4)
+
+ if_idx = self.pg0.sw_if_index
+ cnt = self.statistics.get_counter('/nat64/in2out/tcp')[0]
+ self.assertEqual(cnt[if_idx] - tcpn[if_idx], 1)
+ cnt = self.statistics.get_counter('/nat64/in2out/udp')[0]
+ self.assertEqual(cnt[if_idx] - udpn[if_idx], 1)
+ cnt = self.statistics.get_counter('/nat64/in2out/icmp')[0]
+ self.assertEqual(cnt[if_idx] - icmpn[if_idx], 1)
+ cnt = self.statistics.get_counter('/nat64/in2out/drops')[0]
+ self.assertEqual(cnt[if_idx] - drops[if_idx], 0)
+
+ # out2in
+ tcpn = self.statistics.get_counter('/nat64/out2in/tcp')[0]
+ udpn = self.statistics.get_counter('/nat64/out2in/udp')[0]
+ icmpn = self.statistics.get_counter('/nat64/out2in/icmp')[0]
+ drops = self.statistics.get_counter('/nat64/out2in/drops')[0]
+
+ pkts = self.create_stream_out(self.pg1, dst_ip=self.nat_addr)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ ip = IPv6(src=''.join(['64:ff9b::', self.pg1.remote_ip4]))
+ self.verify_capture_in_ip6(capture, ip[IPv6].src, self.pg0.remote_ip6)
+
+ if_idx = self.pg1.sw_if_index
+ cnt = self.statistics.get_counter('/nat64/out2in/tcp')[0]
+ self.assertEqual(cnt[if_idx] - tcpn[if_idx], 2)
+ cnt = self.statistics.get_counter('/nat64/out2in/udp')[0]
+ self.assertEqual(cnt[if_idx] - udpn[if_idx], 1)
+ cnt = self.statistics.get_counter('/nat64/out2in/icmp')[0]
+ self.assertEqual(cnt[if_idx] - icmpn[if_idx], 1)
+ cnt = self.statistics.get_counter('/nat64/out2in/drops')[0]
+ self.assertEqual(cnt[if_idx] - drops[if_idx], 0)
+
+ bibs = self.statistics.get_counter('/nat64/total-bibs')
+ self.assertEqual(bibs[0][0], 3)
+ sessions = self.statistics.get_counter('/nat64/total-sessions')
+ self.assertEqual(sessions[0][0], 3)
+
+ # in2out
+ pkts = self.create_stream_in_ip6(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture, nat_ip=self.nat_addr,
+ dst_ip=self.pg1.remote_ip4)
+
+ # out2in
+ pkts = self.create_stream_out(self.pg1, dst_ip=self.nat_addr)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ self.verify_capture_in_ip6(capture, ip[IPv6].src, self.pg0.remote_ip6)
+
+ ses_num_end = self.nat64_get_ses_num()
+
+ self.assertEqual(ses_num_end - ses_num_start, 3)
+
+ # tenant with specific VRF
+ self.vapi.nat64_add_del_pool_addr_range(start_addr=self.vrf1_nat_addr,
+ end_addr=self.vrf1_nat_addr,
+ vrf_id=self.vrf1_id, is_add=1)
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat64_add_del_interface(is_add=1, flags=flags,
+ sw_if_index=self.pg2.sw_if_index)
+
+ pkts = self.create_stream_in_ip6(self.pg2, self.pg1)
+ self.pg2.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture, nat_ip=self.vrf1_nat_addr,
+ dst_ip=self.pg1.remote_ip4)
+
+ pkts = self.create_stream_out(self.pg1, dst_ip=self.vrf1_nat_addr)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg2.get_capture(len(pkts))
+ self.verify_capture_in_ip6(capture, ip[IPv6].src, self.pg2.remote_ip6)
+
+ def test_static(self):
+ """ NAT64 static translation test """
+ self.tcp_port_in = 60303
+ self.udp_port_in = 60304
+ self.icmp_id_in = 60305
+ self.tcp_port_out = 60303
+ self.udp_port_out = 60304
+ self.icmp_id_out = 60305
+
+ ses_num_start = self.nat64_get_ses_num()
+
+ self.vapi.nat64_add_del_pool_addr_range(start_addr=self.nat_addr,
+ end_addr=self.nat_addr,
+ vrf_id=0xFFFFFFFF,
+ is_add=1)
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat64_add_del_interface(is_add=1, flags=flags,
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.nat64_add_del_interface(is_add=1, flags=0,
+ sw_if_index=self.pg1.sw_if_index)
+
+ self.vapi.nat64_add_del_static_bib(i_addr=self.pg0.remote_ip6,
+ o_addr=self.nat_addr,
+ i_port=self.tcp_port_in,
+ o_port=self.tcp_port_out,
+ proto=IP_PROTOS.tcp, vrf_id=0,
+ is_add=1)
+ self.vapi.nat64_add_del_static_bib(i_addr=self.pg0.remote_ip6,
+ o_addr=self.nat_addr,
+ i_port=self.udp_port_in,
+ o_port=self.udp_port_out,
+ proto=IP_PROTOS.udp, vrf_id=0,
+ is_add=1)
+ self.vapi.nat64_add_del_static_bib(i_addr=self.pg0.remote_ip6,
+ o_addr=self.nat_addr,
+ i_port=self.icmp_id_in,
+ o_port=self.icmp_id_out,
+ proto=IP_PROTOS.icmp, vrf_id=0,
+ is_add=1)
+
+ # in2out
+ pkts = self.create_stream_in_ip6(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture, nat_ip=self.nat_addr,
+ dst_ip=self.pg1.remote_ip4, same_port=True)
+
+ # out2in
+ pkts = self.create_stream_out(self.pg1, dst_ip=self.nat_addr)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ ip = IPv6(src=''.join(['64:ff9b::', self.pg1.remote_ip4]))
+ self.verify_capture_in_ip6(capture, ip[IPv6].src, self.pg0.remote_ip6)
+
+ ses_num_end = self.nat64_get_ses_num()
+
+ self.assertEqual(ses_num_end - ses_num_start, 3)
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ def test_session_timeout(self):
+ """ NAT64 session timeout """
+ self.icmp_id_in = 1234
+ self.vapi.nat64_add_del_pool_addr_range(start_addr=self.nat_addr,
+ end_addr=self.nat_addr,
+ vrf_id=0xFFFFFFFF,
+ is_add=1)
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat64_add_del_interface(is_add=1, flags=flags,
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.nat64_add_del_interface(is_add=1, flags=0,
+ sw_if_index=self.pg1.sw_if_index)
+ self.vapi.nat64_set_timeouts(udp=300, tcp_established=5,
+ tcp_transitory=5,
+ icmp=5)
+
+ pkts = self.create_stream_in_ip6(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+
+ ses_num_before_timeout = self.nat64_get_ses_num()
+
+ sleep(15)
+
+ # ICMP and TCP session after timeout
+ ses_num_after_timeout = self.nat64_get_ses_num()
+ self.assertEqual(ses_num_before_timeout - ses_num_after_timeout, 2)
+
+ def test_icmp_error(self):
+ """ NAT64 ICMP Error message translation """
+ self.tcp_port_in = 6303
+ self.udp_port_in = 6304
+ self.icmp_id_in = 6305
+
+ self.vapi.nat64_add_del_pool_addr_range(start_addr=self.nat_addr,
+ end_addr=self.nat_addr,
+ vrf_id=0xFFFFFFFF,
+ is_add=1)
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat64_add_del_interface(is_add=1, flags=flags,
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.nat64_add_del_interface(is_add=1, flags=0,
+ sw_if_index=self.pg1.sw_if_index)
+
+ # send some packets to create sessions
+ pkts = self.create_stream_in_ip6(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture_ip4 = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture_ip4,
+ nat_ip=self.nat_addr,
+ dst_ip=self.pg1.remote_ip4)
+
+ pkts = self.create_stream_out(self.pg1, dst_ip=self.nat_addr)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture_ip6 = self.pg0.get_capture(len(pkts))
+ ip = IPv6(src=''.join(['64:ff9b::', self.pg1.remote_ip4]))
+ self.verify_capture_in_ip6(capture_ip6, ip[IPv6].src,
+ self.pg0.remote_ip6)
+
+ # in2out
+ pkts = [Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(src=self.pg0.remote_ip6, dst=ip[IPv6].src) /
+ ICMPv6DestUnreach(code=1) /
+ packet[IPv6] for packet in capture_ip6]
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ for packet in capture:
+ try:
+ self.assertEqual(packet[IP].src, self.nat_addr)
+ self.assertEqual(packet[IP].dst, self.pg1.remote_ip4)
+ self.assertEqual(packet[ICMP].type, 3)
+ self.assertEqual(packet[ICMP].code, 13)
+ inner = packet[IPerror]
+ self.assertEqual(inner.src, self.pg1.remote_ip4)
+ self.assertEqual(inner.dst, self.nat_addr)
+ self.assert_packet_checksums_valid(packet)
+ if inner.haslayer(TCPerror):
+ self.assertEqual(inner[TCPerror].dport, self.tcp_port_out)
+ elif inner.haslayer(UDPerror):
+ self.assertEqual(inner[UDPerror].dport, self.udp_port_out)
+ else:
+ self.assertEqual(inner[ICMPerror].id, self.icmp_id_out)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ # out2in
+ pkts = [Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ ICMP(type=3, code=13) /
+ packet[IP] for packet in capture_ip4]
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ for packet in capture:
+ try:
+ self.assertEqual(packet[IPv6].src, ip.src)
+ self.assertEqual(packet[IPv6].dst, self.pg0.remote_ip6)
+ icmp = packet[ICMPv6DestUnreach]
+ self.assertEqual(icmp.code, 1)
+ inner = icmp[IPerror6]
+ self.assertEqual(inner.src, self.pg0.remote_ip6)
+ self.assertEqual(inner.dst, ip.src)
+ self.assert_icmpv6_checksum_valid(packet)
+ if inner.haslayer(TCPerror):
+ self.assertEqual(inner[TCPerror].sport, self.tcp_port_in)
+ elif inner.haslayer(UDPerror):
+ self.assertEqual(inner[UDPerror].sport, self.udp_port_in)
+ else:
+ self.assertEqual(inner[ICMPv6EchoRequest].id,
+ self.icmp_id_in)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ def test_hairpinning(self):
+ """ NAT64 hairpinning """
+
+ client = self.pg0.remote_hosts[0]
+ server = self.pg0.remote_hosts[1]
+ server_tcp_in_port = 22
+ server_tcp_out_port = 4022
+ server_udp_in_port = 23
+ server_udp_out_port = 4023
+ client_tcp_in_port = 1234
+ client_udp_in_port = 1235
+ client_tcp_out_port = 0
+ client_udp_out_port = 0
+ ip = IPv6(src=''.join(['64:ff9b::', self.nat_addr]))
+ nat_addr_ip6 = ip.src
+
+ self.vapi.nat64_add_del_pool_addr_range(start_addr=self.nat_addr,
+ end_addr=self.nat_addr,
+ vrf_id=0xFFFFFFFF,
+ is_add=1)
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat64_add_del_interface(is_add=1, flags=flags,
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.nat64_add_del_interface(is_add=1, flags=0,
+ sw_if_index=self.pg1.sw_if_index)
+
+ self.vapi.nat64_add_del_static_bib(i_addr=server.ip6n,
+ o_addr=self.nat_addr,
+ i_port=server_tcp_in_port,
+ o_port=server_tcp_out_port,
+ proto=IP_PROTOS.tcp, vrf_id=0,
+ is_add=1)
+ self.vapi.nat64_add_del_static_bib(i_addr=server.ip6n,
+ o_addr=self.nat_addr,
+ i_port=server_udp_in_port,
+ o_port=server_udp_out_port,
+ proto=IP_PROTOS.udp, vrf_id=0,
+ is_add=1)
+
+ # client to server
+ pkts = []
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(src=client.ip6, dst=nat_addr_ip6) /
+ TCP(sport=client_tcp_in_port, dport=server_tcp_out_port))
+ pkts.append(p)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(src=client.ip6, dst=nat_addr_ip6) /
+ UDP(sport=client_udp_in_port, dport=server_udp_out_port))
+ pkts.append(p)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ for packet in capture:
+ try:
+ self.assertEqual(packet[IPv6].src, nat_addr_ip6)
+ self.assertEqual(packet[IPv6].dst, server.ip6)
+ self.assert_packet_checksums_valid(packet)
+ if packet.haslayer(TCP):
+ self.assertNotEqual(packet[TCP].sport, client_tcp_in_port)
+ self.assertEqual(packet[TCP].dport, server_tcp_in_port)
+ client_tcp_out_port = packet[TCP].sport
+ else:
+ self.assertNotEqual(packet[UDP].sport, client_udp_in_port)
+ self.assertEqual(packet[UDP].dport, server_udp_in_port)
+ client_udp_out_port = packet[UDP].sport
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ # server to client
+ pkts = []
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(src=server.ip6, dst=nat_addr_ip6) /
+ TCP(sport=server_tcp_in_port, dport=client_tcp_out_port))
+ pkts.append(p)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(src=server.ip6, dst=nat_addr_ip6) /
+ UDP(sport=server_udp_in_port, dport=client_udp_out_port))
+ pkts.append(p)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ for packet in capture:
+ try:
+ self.assertEqual(packet[IPv6].src, nat_addr_ip6)
+ self.assertEqual(packet[IPv6].dst, client.ip6)
+ self.assert_packet_checksums_valid(packet)
+ if packet.haslayer(TCP):
+ self.assertEqual(packet[TCP].sport, server_tcp_out_port)
+ self.assertEqual(packet[TCP].dport, client_tcp_in_port)
+ else:
+ self.assertEqual(packet[UDP].sport, server_udp_out_port)
+ self.assertEqual(packet[UDP].dport, client_udp_in_port)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ # ICMP error
+ pkts = []
+ pkts = [Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(src=client.ip6, dst=nat_addr_ip6) /
+ ICMPv6DestUnreach(code=1) /
+ packet[IPv6] for packet in capture]
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ for packet in capture:
+ try:
+ self.assertEqual(packet[IPv6].src, nat_addr_ip6)
+ self.assertEqual(packet[IPv6].dst, server.ip6)
+ icmp = packet[ICMPv6DestUnreach]
+ self.assertEqual(icmp.code, 1)
+ inner = icmp[IPerror6]
+ self.assertEqual(inner.src, server.ip6)
+ self.assertEqual(inner.dst, nat_addr_ip6)
+ self.assert_packet_checksums_valid(packet)
+ if inner.haslayer(TCPerror):
+ self.assertEqual(inner[TCPerror].sport, server_tcp_in_port)
+ self.assertEqual(inner[TCPerror].dport,
+ client_tcp_out_port)
+ else:
+ self.assertEqual(inner[UDPerror].sport, server_udp_in_port)
+ self.assertEqual(inner[UDPerror].dport,
+ client_udp_out_port)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ def test_prefix(self):
+ """ NAT64 Network-Specific Prefix """
+
+ self.vapi.nat64_add_del_pool_addr_range(start_addr=self.nat_addr,
+ end_addr=self.nat_addr,
+ vrf_id=0xFFFFFFFF,
+ is_add=1)
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat64_add_del_interface(is_add=1, flags=flags,
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.nat64_add_del_interface(is_add=1, flags=0,
+ sw_if_index=self.pg1.sw_if_index)
+ self.vapi.nat64_add_del_pool_addr_range(start_addr=self.vrf1_nat_addr,
+ end_addr=self.vrf1_nat_addr,
+ vrf_id=self.vrf1_id, is_add=1)
+ self.vapi.nat64_add_del_interface(is_add=1, flags=flags,
+ sw_if_index=self.pg2.sw_if_index)
+
+ # Add global prefix
+ global_pref64 = "2001:db8::"
+ global_pref64_len = 32
+ global_pref64_str = "{}/{}".format(global_pref64, global_pref64_len)
+ self.vapi.nat64_add_del_prefix(prefix=global_pref64_str, vrf_id=0,
+ is_add=1)
+
+ prefix = self.vapi.nat64_prefix_dump()
+ self.assertEqual(len(prefix), 1)
+ self.assertEqual(str(prefix[0].prefix), global_pref64_str)
+ self.assertEqual(prefix[0].vrf_id, 0)
+
+ # Add tenant specific prefix
+ vrf1_pref64 = "2001:db8:122:300::"
+ vrf1_pref64_len = 56
+ vrf1_pref64_str = "{}/{}".format(vrf1_pref64, vrf1_pref64_len)
+ self.vapi.nat64_add_del_prefix(prefix=vrf1_pref64_str,
+ vrf_id=self.vrf1_id, is_add=1)
+
+ prefix = self.vapi.nat64_prefix_dump()
+ self.assertEqual(len(prefix), 2)
+
+ # Global prefix
+ pkts = self.create_stream_in_ip6(self.pg0,
+ self.pg1,
+ pref=global_pref64,
+ plen=global_pref64_len)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture, nat_ip=self.nat_addr,
+ dst_ip=self.pg1.remote_ip4)
+
+ pkts = self.create_stream_out(self.pg1, dst_ip=self.nat_addr)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ dst_ip = self.compose_ip6(self.pg1.remote_ip4,
+ global_pref64,
+ global_pref64_len)
+ self.verify_capture_in_ip6(capture, dst_ip, self.pg0.remote_ip6)
+
+ # Tenant specific prefix
+ pkts = self.create_stream_in_ip6(self.pg2,
+ self.pg1,
+ pref=vrf1_pref64,
+ plen=vrf1_pref64_len)
+ self.pg2.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+ self.verify_capture_out(capture, nat_ip=self.vrf1_nat_addr,
+ dst_ip=self.pg1.remote_ip4)
+
+ pkts = self.create_stream_out(self.pg1, dst_ip=self.vrf1_nat_addr)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg2.get_capture(len(pkts))
+ dst_ip = self.compose_ip6(self.pg1.remote_ip4,
+ vrf1_pref64,
+ vrf1_pref64_len)
+ self.verify_capture_in_ip6(capture, dst_ip, self.pg2.remote_ip6)
+
+ def test_unknown_proto(self):
+ """ NAT64 translate packet with unknown protocol """
+
+ self.vapi.nat64_add_del_pool_addr_range(start_addr=self.nat_addr,
+ end_addr=self.nat_addr,
+ vrf_id=0xFFFFFFFF,
+ is_add=1)
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat64_add_del_interface(is_add=1, flags=flags,
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.nat64_add_del_interface(is_add=1, flags=0,
+ sw_if_index=self.pg1.sw_if_index)
+ remote_ip6 = self.compose_ip6(self.pg1.remote_ip4, '64:ff9b::', 96)
+
+ # in2out
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(src=self.pg0.remote_ip6, dst=remote_ip6) /
+ TCP(sport=self.tcp_port_in, dport=20))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ p = self.pg1.get_capture(1)
+
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(src=self.pg0.remote_ip6, dst=remote_ip6, nh=47) /
+ GRE() /
+ IP(src=self.pg2.local_ip4, dst=self.pg2.remote_ip4) /
+ TCP(sport=1234, dport=1234))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ p = self.pg1.get_capture(1)
+ packet = p[0]
+ try:
+ self.assertEqual(packet[IP].src, self.nat_addr)
+ self.assertEqual(packet[IP].dst, self.pg1.remote_ip4)
+ self.assertEqual(packet.haslayer(GRE), 1)
+ self.assert_packet_checksums_valid(packet)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ # out2in
+ p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.nat_addr) /
+ GRE() /
+ IP(src=self.pg2.remote_ip4, dst=self.pg2.local_ip4) /
+ TCP(sport=1234, dport=1234))
+ self.pg1.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ p = self.pg0.get_capture(1)
+ packet = p[0]
+ try:
+ self.assertEqual(packet[IPv6].src, remote_ip6)
+ self.assertEqual(packet[IPv6].dst, self.pg0.remote_ip6)
+ self.assertEqual(packet[IPv6].nh, 47)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ def test_hairpinning_unknown_proto(self):
+ """ NAT64 translate packet with unknown protocol - hairpinning """
+
+ client = self.pg0.remote_hosts[0]
+ server = self.pg0.remote_hosts[1]
+ server_tcp_in_port = 22
+ server_tcp_out_port = 4022
+ client_tcp_in_port = 1234
+ client_tcp_out_port = 1235
+ server_nat_ip = "10.0.0.100"
+ client_nat_ip = "10.0.0.110"
+ server_nat_ip6 = self.compose_ip6(server_nat_ip, '64:ff9b::', 96)
+ client_nat_ip6 = self.compose_ip6(client_nat_ip, '64:ff9b::', 96)
+
+ self.vapi.nat64_add_del_pool_addr_range(start_addr=server_nat_ip,
+ end_addr=client_nat_ip,
+ vrf_id=0xFFFFFFFF,
+ is_add=1)
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat64_add_del_interface(is_add=1, flags=flags,
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.nat64_add_del_interface(is_add=1, flags=0,
+ sw_if_index=self.pg1.sw_if_index)
+
+ self.vapi.nat64_add_del_static_bib(i_addr=server.ip6n,
+ o_addr=server_nat_ip,
+ i_port=server_tcp_in_port,
+ o_port=server_tcp_out_port,
+ proto=IP_PROTOS.tcp, vrf_id=0,
+ is_add=1)
+
+ self.vapi.nat64_add_del_static_bib(i_addr=server.ip6n,
+ o_addr=server_nat_ip, i_port=0,
+ o_port=0,
+ proto=IP_PROTOS.gre, vrf_id=0,
+ is_add=1)
+
+ self.vapi.nat64_add_del_static_bib(i_addr=client.ip6n,
+ o_addr=client_nat_ip,
+ i_port=client_tcp_in_port,
+ o_port=client_tcp_out_port,
+ proto=IP_PROTOS.tcp, vrf_id=0,
+ is_add=1)
+
+ # client to server
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(src=client.ip6, dst=server_nat_ip6) /
+ TCP(sport=client_tcp_in_port, dport=server_tcp_out_port))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ p = self.pg0.get_capture(1)
+
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(src=client.ip6, dst=server_nat_ip6, nh=IP_PROTOS.gre) /
+ GRE() /
+ IP(src=self.pg2.local_ip4, dst=self.pg2.remote_ip4) /
+ TCP(sport=1234, dport=1234))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ p = self.pg0.get_capture(1)
+ packet = p[0]
+ try:
+ self.assertEqual(packet[IPv6].src, client_nat_ip6)
+ self.assertEqual(packet[IPv6].dst, server.ip6)
+ self.assertEqual(packet[IPv6].nh, IP_PROTOS.gre)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ # server to client
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(src=server.ip6, dst=client_nat_ip6, nh=IP_PROTOS.gre) /
+ GRE() /
+ IP(src=self.pg2.remote_ip4, dst=self.pg2.local_ip4) /
+ TCP(sport=1234, dport=1234))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ p = self.pg0.get_capture(1)
+ packet = p[0]
+ try:
+ self.assertEqual(packet[IPv6].src, server_nat_ip6)
+ self.assertEqual(packet[IPv6].dst, client.ip6)
+ self.assertEqual(packet[IPv6].nh, IP_PROTOS.gre)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ def test_one_armed_nat64(self):
+ """ One armed NAT64 """
+ external_port = 0
+ remote_host_ip6 = self.compose_ip6(self.pg3.remote_ip4,
+ '64:ff9b::',
+ 96)
+
+ self.vapi.nat64_add_del_pool_addr_range(start_addr=self.nat_addr,
+ end_addr=self.nat_addr,
+ vrf_id=0xFFFFFFFF,
+ is_add=1)
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat64_add_del_interface(is_add=1, flags=flags,
+ sw_if_index=self.pg3.sw_if_index)
+ self.vapi.nat64_add_del_interface(is_add=1, flags=0,
+ sw_if_index=self.pg3.sw_if_index)
+
+ # in2out
+ p = (Ether(src=self.pg3.remote_mac, dst=self.pg3.local_mac) /
+ IPv6(src=self.pg3.remote_ip6, dst=remote_host_ip6) /
+ TCP(sport=12345, dport=80))
+ self.pg3.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg3.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.nat_addr)
+ self.assertEqual(ip.dst, self.pg3.remote_ip4)
+ self.assertNotEqual(tcp.sport, 12345)
+ external_port = tcp.sport
+ self.assertEqual(tcp.dport, 80)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # out2in
+ p = (Ether(src=self.pg3.remote_mac, dst=self.pg3.local_mac) /
+ IP(src=self.pg3.remote_ip4, dst=self.nat_addr) /
+ TCP(sport=80, dport=external_port))
+ self.pg3.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg3.get_capture(1)
+ p = capture[0]
+ try:
+ ip = p[IPv6]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, remote_host_ip6)
+ self.assertEqual(ip.dst, self.pg3.remote_ip6)
+ self.assertEqual(tcp.sport, 80)
+ self.assertEqual(tcp.dport, 12345)
+ self.assert_packet_checksums_valid(p)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ def test_frag_in_order(self):
+ """ NAT64 translate fragments arriving in order """
+ self.tcp_port_in = random.randint(1025, 65535)
+
+ self.vapi.nat64_add_del_pool_addr_range(start_addr=self.nat_addr,
+ end_addr=self.nat_addr,
+ vrf_id=0xFFFFFFFF,
+ is_add=1)
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat64_add_del_interface(is_add=1, flags=flags,
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.nat64_add_del_interface(is_add=1, flags=0,
+ sw_if_index=self.pg1.sw_if_index)
+
+ # in2out
+ data = b'a' * 200
+ pkts = self.create_stream_frag_ip6(self.pg0, self.pg1.remote_ip4,
+ self.tcp_port_in, 20, data)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ frags = self.pg1.get_capture(len(pkts))
+ p = self.reass_frags_and_verify(frags,
+ self.nat_addr,
+ self.pg1.remote_ip4)
+ self.assertEqual(p[TCP].dport, 20)
+ self.assertNotEqual(p[TCP].sport, self.tcp_port_in)
+ self.tcp_port_out = p[TCP].sport
+ self.assertEqual(data, p[Raw].load)
+
+ # out2in
+ data = b"A" * 4 + b"b" * 16 + b"C" * 3
+ pkts = self.create_stream_frag(self.pg1,
+ self.nat_addr,
+ 20,
+ self.tcp_port_out,
+ data)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ frags = self.pg0.get_capture(len(pkts))
+ self.logger.debug(ppc("Captured:", frags))
+ src = self.compose_ip6(self.pg1.remote_ip4, '64:ff9b::', 96)
+ p = self.reass_frags_and_verify_ip6(frags, src, self.pg0.remote_ip6)
+ self.assertEqual(p[TCP].sport, 20)
+ self.assertEqual(p[TCP].dport, self.tcp_port_in)
+ self.assertEqual(data, p[Raw].load)
+
+ def test_reass_hairpinning(self):
+ """ NAT64 fragments hairpinning """
+ data = b'a' * 200
+ server = self.pg0.remote_hosts[1]
+ server_in_port = random.randint(1025, 65535)
+ server_out_port = random.randint(1025, 65535)
+ client_in_port = random.randint(1025, 65535)
+ ip = IPv6(src=''.join(['64:ff9b::', self.nat_addr]))
+ nat_addr_ip6 = ip.src
+
+ self.vapi.nat64_add_del_pool_addr_range(start_addr=self.nat_addr,
+ end_addr=self.nat_addr,
+ vrf_id=0xFFFFFFFF,
+ is_add=1)
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat64_add_del_interface(is_add=1, flags=flags,
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.nat64_add_del_interface(is_add=1, flags=0,
+ sw_if_index=self.pg1.sw_if_index)
+
+ # add static BIB entry for server
+ self.vapi.nat64_add_del_static_bib(i_addr=server.ip6n,
+ o_addr=self.nat_addr,
+ i_port=server_in_port,
+ o_port=server_out_port,
+ proto=IP_PROTOS.tcp, vrf_id=0,
+ is_add=1)
+
+ # send packet from host to server
+ pkts = self.create_stream_frag_ip6(self.pg0,
+ self.nat_addr,
+ client_in_port,
+ server_out_port,
+ data)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ frags = self.pg0.get_capture(len(pkts))
+ self.logger.debug(ppc("Captured:", frags))
+ p = self.reass_frags_and_verify_ip6(frags, nat_addr_ip6, server.ip6)
+ self.assertNotEqual(p[TCP].sport, client_in_port)
+ self.assertEqual(p[TCP].dport, server_in_port)
+ self.assertEqual(data, p[Raw].load)
+
+ def test_frag_out_of_order(self):
+ """ NAT64 translate fragments arriving out of order """
+ self.tcp_port_in = random.randint(1025, 65535)
+
+ self.vapi.nat64_add_del_pool_addr_range(start_addr=self.nat_addr,
+ end_addr=self.nat_addr,
+ vrf_id=0xFFFFFFFF,
+ is_add=1)
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat64_add_del_interface(is_add=1, flags=flags,
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.nat64_add_del_interface(is_add=1, flags=0,
+ sw_if_index=self.pg1.sw_if_index)
+
+ # in2out
+ data = b'a' * 200
+ pkts = self.create_stream_frag_ip6(self.pg0, self.pg1.remote_ip4,
+ self.tcp_port_in, 20, data)
+ pkts.reverse()
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ frags = self.pg1.get_capture(len(pkts))
+ p = self.reass_frags_and_verify(frags,
+ self.nat_addr,
+ self.pg1.remote_ip4)
+ self.assertEqual(p[TCP].dport, 20)
+ self.assertNotEqual(p[TCP].sport, self.tcp_port_in)
+ self.tcp_port_out = p[TCP].sport
+ self.assertEqual(data, p[Raw].load)
+
+ # out2in
+ data = b"A" * 4 + b"B" * 16 + b"C" * 3
+ pkts = self.create_stream_frag(self.pg1,
+ self.nat_addr,
+ 20,
+ self.tcp_port_out,
+ data)
+ pkts.reverse()
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ frags = self.pg0.get_capture(len(pkts))
+ src = self.compose_ip6(self.pg1.remote_ip4, '64:ff9b::', 96)
+ p = self.reass_frags_and_verify_ip6(frags, src, self.pg0.remote_ip6)
+ self.assertEqual(p[TCP].sport, 20)
+ self.assertEqual(p[TCP].dport, self.tcp_port_in)
+ self.assertEqual(data, p[Raw].load)
+
+ def test_interface_addr(self):
+ """ Acquire NAT64 pool addresses from interface """
+ self.vapi.nat64_add_del_interface_addr(
+ is_add=1,
+ sw_if_index=self.pg4.sw_if_index)
+
+ # no address in NAT64 pool
+ addresses = self.vapi.nat44_address_dump()
+ self.assertEqual(0, len(addresses))
+
+ # configure interface address and check NAT64 address pool
+ self.pg4.config_ip4()
+ addresses = self.vapi.nat64_pool_addr_dump()
+ self.assertEqual(len(addresses), 1)
+
+ self.assertEqual(str(addresses[0].address),
+ self.pg4.local_ip4)
+
+ # remove interface address and check NAT64 address pool
+ self.pg4.unconfig_ip4()
+ addresses = self.vapi.nat64_pool_addr_dump()
+ self.assertEqual(0, len(addresses))
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ def test_ipfix_max_bibs_sessions(self):
+ """ IPFIX logging maximum session and BIB entries exceeded """
+ max_bibs = 1280
+ max_sessions = 2560
+ remote_host_ip6 = self.compose_ip6(self.pg1.remote_ip4,
+ '64:ff9b::',
+ 96)
+
+ self.vapi.nat64_add_del_pool_addr_range(start_addr=self.nat_addr,
+ end_addr=self.nat_addr,
+ vrf_id=0xFFFFFFFF,
+ is_add=1)
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat64_add_del_interface(is_add=1, flags=flags,
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.nat64_add_del_interface(is_add=1, flags=0,
+ sw_if_index=self.pg1.sw_if_index)
+
+ pkts = []
+ src = ""
+ for i in range(0, max_bibs):
+ src = "fd01:aa::%x" % (i)
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IPv6(src=src, dst=remote_host_ip6) /
+ TCP(sport=12345, dport=80))
+ pkts.append(p)
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IPv6(src=src, dst=remote_host_ip6) /
+ TCP(sport=12345, dport=22))
+ pkts.append(p)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(max_sessions)
+
+ self.vapi.set_ipfix_exporter(collector_address=self.pg3.remote_ip4,
+ src_address=self.pg3.local_ip4,
+ path_mtu=512,
+ template_interval=10)
+ self.vapi.nat_ipfix_enable_disable(domain_id=self.ipfix_domain_id,
+ src_port=self.ipfix_src_port,
+ enable=1)
+
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IPv6(src=src, dst=remote_host_ip6) /
+ TCP(sport=12345, dport=25))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.assert_nothing_captured()
+ sleep(1)
+ self.vapi.ipfix_flush()
+ capture = self.pg3.get_capture(7)
+ ipfix = IPFIXDecoder()
+ # first load template
+ for p in capture:
+ self.assertTrue(p.haslayer(IPFIX))
+ self.assertEqual(p[IP].src, self.pg3.local_ip4)
+ self.assertEqual(p[IP].dst, self.pg3.remote_ip4)
+ self.assertEqual(p[UDP].sport, self.ipfix_src_port)
+ self.assertEqual(p[UDP].dport, 4739)
+ self.assertEqual(p[IPFIX].observationDomainID,
+ self.ipfix_domain_id)
+ if p.haslayer(Template):
+ ipfix.add_template(p.getlayer(Template))
+ # verify events in data set
+ for p in capture:
+ if p.haslayer(Data):
+ data = ipfix.decode_data_set(p.getlayer(Set))
+ self.verify_ipfix_max_sessions(data, max_sessions)
+
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IPv6(src=self.pg0.remote_ip6, dst=remote_host_ip6) /
+ TCP(sport=12345, dport=80))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.assert_nothing_captured()
+ sleep(1)
+ self.vapi.ipfix_flush()
+ capture = self.pg3.get_capture(1)
+ # verify events in data set
+ for p in capture:
+ self.assertTrue(p.haslayer(IPFIX))
+ self.assertEqual(p[IP].src, self.pg3.local_ip4)
+ self.assertEqual(p[IP].dst, self.pg3.remote_ip4)
+ self.assertEqual(p[UDP].sport, self.ipfix_src_port)
+ self.assertEqual(p[UDP].dport, 4739)
+ self.assertEqual(p[IPFIX].observationDomainID,
+ self.ipfix_domain_id)
+ if p.haslayer(Data):
+ data = ipfix.decode_data_set(p.getlayer(Set))
+ self.verify_ipfix_max_bibs(data, max_bibs)
+
+ def test_ipfix_bib_ses(self):
+ """ IPFIX logging NAT64 BIB/session create and delete events """
+ self.tcp_port_in = random.randint(1025, 65535)
+ remote_host_ip6 = self.compose_ip6(self.pg1.remote_ip4,
+ '64:ff9b::',
+ 96)
+
+ self.vapi.nat64_add_del_pool_addr_range(start_addr=self.nat_addr,
+ end_addr=self.nat_addr,
+ vrf_id=0xFFFFFFFF,
+ is_add=1)
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat64_add_del_interface(is_add=1, flags=flags,
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.nat64_add_del_interface(is_add=1, flags=0,
+ sw_if_index=self.pg1.sw_if_index)
+ self.vapi.set_ipfix_exporter(collector_address=self.pg3.remote_ip4,
+ src_address=self.pg3.local_ip4,
+ path_mtu=512,
+ template_interval=10)
+ self.vapi.nat_ipfix_enable_disable(domain_id=self.ipfix_domain_id,
+ src_port=self.ipfix_src_port,
+ enable=1)
+
+ # Create
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IPv6(src=self.pg0.remote_ip6, dst=remote_host_ip6) /
+ TCP(sport=self.tcp_port_in, dport=25))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ p = self.pg1.get_capture(1)
+ self.tcp_port_out = p[0][TCP].sport
+ self.vapi.ipfix_flush()
+ capture = self.pg3.get_capture(8)
+ ipfix = IPFIXDecoder()
+ # first load template
+ for p in capture:
+ self.assertTrue(p.haslayer(IPFIX))
+ self.assertEqual(p[IP].src, self.pg3.local_ip4)
+ self.assertEqual(p[IP].dst, self.pg3.remote_ip4)
+ self.assertEqual(p[UDP].sport, self.ipfix_src_port)
+ self.assertEqual(p[UDP].dport, 4739)
+ self.assertEqual(p[IPFIX].observationDomainID,
+ self.ipfix_domain_id)
+ if p.haslayer(Template):
+ ipfix.add_template(p.getlayer(Template))
+ # verify events in data set
+ for p in capture:
+ if p.haslayer(Data):
+ data = ipfix.decode_data_set(p.getlayer(Set))
+ if scapy.compat.orb(data[0][230]) == 10:
+ self.verify_ipfix_bib(data, 1, self.pg0.remote_ip6)
+ elif scapy.compat.orb(data[0][230]) == 6:
+ self.verify_ipfix_nat64_ses(data,
+ 1,
+ self.pg0.remote_ip6,
+ self.pg1.remote_ip4,
+ 25)
+ else:
+ self.logger.error(ppp("Unexpected or invalid packet: ", p))
+
+ # Delete
+ self.pg_enable_capture(self.pg_interfaces)
+ self.vapi.nat64_add_del_pool_addr_range(start_addr=self.nat_addr,
+ end_addr=self.nat_addr,
+ vrf_id=0xFFFFFFFF,
+ is_add=0)
+ self.vapi.ipfix_flush()
+ capture = self.pg3.get_capture(2)
+ # verify events in data set
+ for p in capture:
+ self.assertTrue(p.haslayer(IPFIX))
+ self.assertEqual(p[IP].src, self.pg3.local_ip4)
+ self.assertEqual(p[IP].dst, self.pg3.remote_ip4)
+ self.assertEqual(p[UDP].sport, self.ipfix_src_port)
+ self.assertEqual(p[UDP].dport, 4739)
+ self.assertEqual(p[IPFIX].observationDomainID,
+ self.ipfix_domain_id)
+ if p.haslayer(Data):
+ data = ipfix.decode_data_set(p.getlayer(Set))
+ if scapy.compat.orb(data[0][230]) == 11:
+ self.verify_ipfix_bib(data, 0, self.pg0.remote_ip6)
+ elif scapy.compat.orb(data[0][230]) == 7:
+ self.verify_ipfix_nat64_ses(data,
+ 0,
+ self.pg0.remote_ip6,
+ self.pg1.remote_ip4,
+ 25)
+ else:
+ self.logger.error(ppp("Unexpected or invalid packet: ", p))
+
+ def test_syslog_sess(self):
+ """ Test syslog session creation and deletion """
+ self.tcp_port_in = random.randint(1025, 65535)
+ remote_host_ip6 = self.compose_ip6(self.pg1.remote_ip4,
+ '64:ff9b::',
+ 96)
+
+ self.vapi.nat64_add_del_pool_addr_range(start_addr=self.nat_addr,
+ end_addr=self.nat_addr,
+ vrf_id=0xFFFFFFFF,
+ is_add=1)
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat64_add_del_interface(is_add=1, flags=flags,
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.nat64_add_del_interface(is_add=1, flags=0,
+ sw_if_index=self.pg1.sw_if_index)
+ self.vapi.syslog_set_filter(
+ self.SYSLOG_SEVERITY.SYSLOG_API_SEVERITY_INFO)
+ self.vapi.syslog_set_sender(self.pg3.local_ip4, self.pg3.remote_ip4)
+
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IPv6(src=self.pg0.remote_ip6, dst=remote_host_ip6) /
+ TCP(sport=self.tcp_port_in, dport=self.tcp_external_port))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ p = self.pg1.get_capture(1)
+ self.tcp_port_out = p[0][TCP].sport
+ capture = self.pg3.get_capture(1)
+ self.verify_syslog_sess(capture[0][Raw].load, is_ip6=True)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.vapi.nat64_add_del_pool_addr_range(start_addr=self.nat_addr,
+ end_addr=self.nat_addr,
+ vrf_id=0xFFFFFFFF,
+ is_add=0)
+ capture = self.pg3.get_capture(1)
+ self.verify_syslog_sess(capture[0][Raw].load, False, True)
+
+ def nat64_get_ses_num(self):
+ """
+ Return number of active NAT64 sessions.
+ """
+ st = self.vapi.nat64_st_dump(proto=255)
+ return len(st)
+
+ def clear_nat64(self):
+ """
+ Clear NAT64 configuration.
+ """
+ self.vapi.nat_ipfix_enable_disable(domain_id=self.ipfix_domain_id,
+ src_port=self.ipfix_src_port,
+ enable=0)
+ self.ipfix_src_port = 4739
+ self.ipfix_domain_id = 1
+
+ self.vapi.syslog_set_filter(
+ self.SYSLOG_SEVERITY.SYSLOG_API_SEVERITY_EMERG)
+
+ self.vapi.nat64_set_timeouts(udp=300, tcp_established=7440,
+ tcp_transitory=240, icmp=60)
+
+ interfaces = self.vapi.nat64_interface_dump()
+ for intf in interfaces:
+ self.vapi.nat64_add_del_interface(is_add=0, flags=intf.flags,
+ sw_if_index=intf.sw_if_index)
+
+ bib = self.vapi.nat64_bib_dump(proto=255)
+ for bibe in bib:
+ if bibe.flags & self.config_flags.NAT_IS_STATIC:
+ self.vapi.nat64_add_del_static_bib(i_addr=bibe.i_addr,
+ o_addr=bibe.o_addr,
+ i_port=bibe.i_port,
+ o_port=bibe.o_port,
+ proto=bibe.proto,
+ vrf_id=bibe.vrf_id,
+ is_add=0)
+
+ adresses = self.vapi.nat64_pool_addr_dump()
+ for addr in adresses:
+ self.vapi.nat64_add_del_pool_addr_range(start_addr=addr.address,
+ end_addr=addr.address,
+ vrf_id=addr.vrf_id,
+ is_add=0)
+
+ prefixes = self.vapi.nat64_prefix_dump()
+ for prefix in prefixes:
+ self.vapi.nat64_add_del_prefix(prefix=str(prefix.prefix),
+ vrf_id=prefix.vrf_id, is_add=0)
+
+ bibs = self.statistics.get_counter('/nat64/total-bibs')
+ self.assertEqual(bibs[0][0], 0)
+ sessions = self.statistics.get_counter('/nat64/total-sessions')
+ self.assertEqual(sessions[0][0], 0)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_nat66.py b/test/test_nat66.py
new file mode 100644
index 00000000000..acda72bcdf6
--- /dev/null
+++ b/test/test_nat66.py
@@ -0,0 +1,179 @@
+#!/usr/bin/env python3
+
+import ipaddress
+import random
+import socket
+import struct
+import unittest
+from io import BytesIO
+from time import sleep
+
+import scapy.compat
+from framework import VppTestCase, VppTestRunner, running_extended_tests
+from ipfix import IPFIX, Set, Template, Data, IPFIXDecoder
+from scapy.all import bind_layers, Packet, ByteEnumField, ShortField, \
+ IPField, IntField, LongField, XByteField, FlagsField, FieldLenField, \
+ PacketListField
+from scapy.data import IP_PROTOS
+from scapy.layers.inet import IP, TCP, UDP, ICMP
+from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror
+from scapy.layers.inet6 import ICMPv6DestUnreach, IPerror6, IPv6ExtHdrFragment
+from scapy.layers.inet6 import IPv6, ICMPv6EchoRequest, ICMPv6EchoReply, \
+ ICMPv6ND_NS, ICMPv6ND_NA, ICMPv6NDOptDstLLAddr, fragment6
+from scapy.layers.l2 import Ether, ARP, GRE
+from scapy.packet import Raw
+from syslog_rfc5424_parser import SyslogMessage, ParseError
+from syslog_rfc5424_parser.constants import SyslogSeverity
+from util import ip4_range
+from util import ppc, ppp
+from vpp_acl import AclRule, VppAcl, VppAclInterface
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_neighbor import VppNeighbor
+from vpp_papi import VppEnum
+
+
+class TestNAT66(VppTestCase):
+ """ NAT66 Test Cases """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestNAT66, cls).setUpClass()
+
+ cls.nat_addr = 'fd01:ff::2'
+ cls.create_pg_interfaces(range(2))
+ cls.interfaces = list(cls.pg_interfaces)
+
+ for i in cls.interfaces:
+ i.admin_up()
+ i.config_ip6()
+ i.configure_ipv6_neighbors()
+
+ @property
+ def config_flags(self):
+ return VppEnum.vl_api_nat_config_flags_t
+
+ def plugin_enable(self):
+ self.vapi.nat66_plugin_enable_disable(enable=1)
+
+ def plugin_disable(self):
+ self.vapi.nat66_plugin_enable_disable(enable=0)
+
+ def setUp(self):
+ super(TestNAT66, self).setUp()
+ self.plugin_enable()
+
+ def tearDown(self):
+ super(TestNAT66, self).tearDown()
+ if not self.vpp_dead:
+ self.plugin_disable()
+
+ def test_static(self):
+ """ 1:1 NAT66 test """
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat66_add_del_interface(is_add=1, flags=flags,
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.nat66_add_del_interface(is_add=1,
+ sw_if_index=self.pg1.sw_if_index)
+ self.vapi.nat66_add_del_static_mapping(
+ local_ip_address=self.pg0.remote_ip6,
+ external_ip_address=self.nat_addr,
+ is_add=1)
+
+ # in2out
+ pkts = []
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(src=self.pg0.remote_ip6, dst=self.pg1.remote_ip6) /
+ TCP())
+ pkts.append(p)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(src=self.pg0.remote_ip6, dst=self.pg1.remote_ip6) /
+ UDP())
+ pkts.append(p)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(src=self.pg0.remote_ip6, dst=self.pg1.remote_ip6) /
+ ICMPv6EchoRequest())
+ pkts.append(p)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(src=self.pg0.remote_ip6, dst=self.pg1.remote_ip6) /
+ GRE() / IP() / TCP())
+ pkts.append(p)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(len(pkts))
+
+ for packet in capture:
+ try:
+ self.assertEqual(packet[IPv6].src, self.nat_addr)
+ self.assertEqual(packet[IPv6].dst, self.pg1.remote_ip6)
+ self.assert_packet_checksums_valid(packet)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ # out2in
+ pkts = []
+ p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IPv6(src=self.pg1.remote_ip6, dst=self.nat_addr) /
+ TCP())
+ pkts.append(p)
+ p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IPv6(src=self.pg1.remote_ip6, dst=self.nat_addr) /
+ UDP())
+ pkts.append(p)
+ p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IPv6(src=self.pg1.remote_ip6, dst=self.nat_addr) /
+ ICMPv6EchoReply())
+ pkts.append(p)
+ p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IPv6(src=self.pg1.remote_ip6, dst=self.nat_addr) /
+ GRE() / IP() / TCP())
+ pkts.append(p)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture(len(pkts))
+ for packet in capture:
+ try:
+ self.assertEqual(packet[IPv6].src, self.pg1.remote_ip6)
+ self.assertEqual(packet[IPv6].dst, self.pg0.remote_ip6)
+ self.assert_packet_checksums_valid(packet)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ sm = self.vapi.nat66_static_mapping_dump()
+ self.assertEqual(len(sm), 1)
+ self.assertEqual(sm[0].total_pkts, 8)
+
+ def test_check_no_translate(self):
+ """ NAT66 translate only when egress interface is outside interface """
+ flags = self.config_flags.NAT_IS_INSIDE
+ self.vapi.nat66_add_del_interface(is_add=1, flags=flags,
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.nat66_add_del_interface(is_add=1, flags=flags,
+ sw_if_index=self.pg1.sw_if_index)
+ self.vapi.nat66_add_del_static_mapping(
+ local_ip_address=self.pg0.remote_ip6,
+ external_ip_address=self.nat_addr,
+ is_add=1)
+
+ # in2out
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(src=self.pg0.remote_ip6, dst=self.pg1.remote_ip6) /
+ UDP())
+ self.pg0.add_stream([p])
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture(1)
+ packet = capture[0]
+ try:
+ self.assertEqual(packet[IPv6].src, self.pg0.remote_ip6)
+ self.assertEqual(packet[IPv6].dst, self.pg1.remote_ip6)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_ping.py b/test/test_ping.py
new file mode 100644
index 00000000000..8c5c087b0c5
--- /dev/null
+++ b/test/test_ping.py
@@ -0,0 +1,176 @@
+import socket
+
+from scapy.layers.inet import IP, UDP, ICMP
+from scapy.layers.inet6 import IPv6
+from scapy.layers.l2 import Ether, GRE
+from scapy.packet import Raw
+
+from framework import VppTestCase
+from util import ppp
+from vpp_ip_route import VppIpInterfaceAddress, VppIpRoute, VppRoutePath
+from vpp_neighbor import VppNeighbor
+
+""" TestPing is a subclass of VPPTestCase classes.
+
+Basic test for sanity check of the ping.
+
+"""
+
+
+class TestPing(VppTestCase):
+ """ Ping Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestPing, cls).setUpClass()
+ try:
+ cls.create_pg_interfaces(range(2))
+ cls.interfaces = list(cls.pg_interfaces)
+
+ for i in cls.interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.config_ip6()
+ i.disable_ipv6_ra()
+ i.resolve_arp()
+ i.resolve_ndp()
+ except Exception:
+ super(TestPing, cls).tearDownClass()
+ raise
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestPing, cls).tearDownClass()
+
+ def tearDown(self):
+ super(TestPing, self).tearDown()
+
+ def show_commands_at_teardown(self):
+ self.logger.info(self.vapi.cli("show hardware"))
+
+ def verify_ping_request(self, p, src, dst, seq):
+ ip = p[IP]
+ self.assertEqual(ip.version, 4)
+ self.assertEqual(ip.flags, 0)
+ self.assertEqual(ip.src, src)
+ self.assertEqual(ip.dst, dst)
+ self.assertEqual(ip.proto, 1)
+ self.assertEqual(len(ip.options), 0)
+ self.assertGreaterEqual(ip.ttl, 254)
+ icmp = p[ICMP]
+ self.assertEqual(icmp.type, 8)
+ self.assertEqual(icmp.code, 0)
+ self.assertEqual(icmp.seq, seq)
+ return icmp
+
+ def test_ping_basic(self):
+ """ basic ping test """
+ try:
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.logger.info(self.vapi.cli("show ip4 neighbors"))
+ self.logger.info(self.vapi.cli("show ip6 neighbors"))
+
+ remote_ip4 = self.pg1.remote_ip4
+ ping_cmd = "ping " + remote_ip4 + " interval 0.01 repeat 10"
+ ret = self.vapi.cli(ping_cmd)
+ self.logger.info(ret)
+ out = self.pg1.get_capture(10)
+ icmp_id = None
+ icmp_seq = 1
+ for p in out:
+ icmp = self.verify_ping_request(p, self.pg1.local_ip4,
+ self.pg1.remote_ip4, icmp_seq)
+ icmp_seq = icmp_seq + 1
+ if icmp_id is None:
+ icmp_id = icmp.id
+ else:
+ self.assertEqual(icmp.id, icmp_id)
+ finally:
+ self.vapi.cli("show error")
+
+ def test_ping_burst(self):
+ """ burst ping test """
+ try:
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.logger.info(self.vapi.cli("show ip neighbors"))
+
+ remote_ip4 = self.pg1.remote_ip4
+ ping_cmd = "ping " + remote_ip4 + " interval 0.01 burst 3"
+ ret = self.vapi.cli(ping_cmd)
+ self.logger.info(ret)
+ out = self.pg1.get_capture(3*5)
+ icmp_id = None
+ icmp_seq = 1
+ count = 0
+ for p in out:
+ icmp = self.verify_ping_request(p, self.pg1.local_ip4,
+ self.pg1.remote_ip4, icmp_seq)
+ count = count + 1
+ if count >= 3:
+ icmp_seq = icmp_seq + 1
+ count = 0
+ if icmp_id is None:
+ icmp_id = icmp.id
+ else:
+ self.assertEqual(icmp.id, icmp_id)
+ finally:
+ self.vapi.cli("show error")
+
+ def test_ping_src(self):
+ """ ping with source address set """
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.logger.info(self.vapi.cli("show ip4 neighbors"))
+ self.logger.info(self.vapi.cli("show ip6 neighbors"))
+
+ nbr_addr = "10.0.0.2"
+ VppIpInterfaceAddress(self, self.pg1, "10.0.0.1", 24).add_vpp_config()
+ VppNeighbor(self, self.pg1.sw_if_index,
+ "00:11:22:33:44:55",
+ nbr_addr).add_vpp_config()
+
+ ping_cmd = "ping %s interval 0.01 repeat 3" % self.pg1.remote_ip4
+ ret = self.vapi.cli(ping_cmd)
+ out = self.pg1.get_capture(3)
+ icmp_seq = 1
+ for p in out:
+ icmp = self.verify_ping_request(p, self.pg1.local_ip4,
+ self.pg1.remote_ip4, icmp_seq)
+ icmp_seq = icmp_seq + 1
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ ping_cmd = "ping %s interval 0.01 repeat 3" % nbr_addr
+ ret = self.vapi.cli(ping_cmd)
+ out = self.pg1.get_capture(3)
+ icmp_seq = 1
+ for p in out:
+ icmp = self.verify_ping_request(p, "10.0.0.1", nbr_addr, icmp_seq)
+ icmp_seq = icmp_seq + 1
+
+ def test_ping_fib_routed_dst(self):
+ """ ping destination routed according to FIB table """
+
+ try:
+ self.pg1.generate_remote_hosts(1)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ routed_dst = "10.0.2.0"
+ self.logger.info(self.vapi.cli("show ip4 neighbors"))
+ VppIpRoute(self, routed_dst, 24,
+ [VppRoutePath(self.pg1.remote_hosts[0].ip4,
+ self.pg1.sw_if_index)]).add_vpp_config()
+ ping_cmd = "ping %s interval 0.01 repeat 3" % routed_dst
+ ret = self.vapi.cli(ping_cmd)
+ self.logger.info(ret)
+ out = self.pg1.get_capture(3)
+ icmp_seq = 1
+ for p in out:
+ self.verify_ping_request(p, self.pg1.local_ip4, routed_dst,
+ icmp_seq)
+ icmp_seq = icmp_seq + 1
+ finally:
+ self.vapi.cli("show error")
diff --git a/test/test_pnat.py b/test/test_pnat.py
new file mode 100644
index 00000000000..d5b60050691
--- /dev/null
+++ b/test/test_pnat.py
@@ -0,0 +1,203 @@
+#!/usr/bin/env python3
+"""Policy 1:1 NAT functional tests"""
+
+import unittest
+from scapy.layers.inet import Ether, IP, UDP, ICMP
+from framework import VppTestCase, VppTestRunner
+from vpp_papi import VppEnum
+
+
+class TestPNAT(VppTestCase):
+ """ PNAT Test Case """
+ maxDiff = None
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestPNAT, cls).setUpClass()
+ cls.create_pg_interfaces(range(2))
+ cls.interfaces = list(cls.pg_interfaces)
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestPNAT, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestPNAT, self).setUp()
+ for i in self.interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+
+ def tearDown(self):
+ super(TestPNAT, self).tearDown()
+ if not self.vpp_dead:
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.admin_down()
+
+ def validate(self, rx, expected):
+ self.assertEqual(rx, expected.__class__(expected))
+
+ def validate_bytes(self, rx, expected):
+ self.assertEqual(rx, expected)
+
+ def ping_check(self):
+ """ Verify non matching traffic works. """
+ p_ether = Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)
+
+ icmpecho = (IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
+ ICMP())
+ reply = (IP(src=self.pg0.local_ip4, dst=self.pg0.remote_ip4) /
+ ICMP(type='echo-reply'))
+ rx = self.send_and_expect(self.pg0, p_ether/icmpecho * 1, self.pg0)
+ for p in rx:
+ reply[IP].id = p[IP].id
+ self.validate(p[1], reply)
+
+ def test_pnat(self):
+ """ PNAT test """
+
+ PNAT_IP4_INPUT = VppEnum.vl_api_pnat_attachment_point_t.PNAT_IP4_INPUT
+ PNAT_IP4_OUTPUT = \
+ VppEnum.vl_api_pnat_attachment_point_t.PNAT_IP4_OUTPUT
+
+ tests = [
+ {
+ 'input': PNAT_IP4_INPUT,
+ 'sw_if_index': self.pg0.sw_if_index,
+ 'match': {'mask': 0xa, 'dst': '10.10.10.10', 'proto': 17,
+ 'dport': 6871},
+ 'rewrite': {'mask': 0x2, 'dst': self.pg1.remote_ip4},
+ 'send': (IP(src=self.pg0.remote_ip4, dst='10.10.10.10') /
+ UDP(dport=6871)),
+ 'reply': (IP(src=self.pg0.remote_ip4,
+ dst=self.pg1.remote_ip4) /
+ UDP(dport=6871))
+ },
+ {
+ 'input': PNAT_IP4_OUTPUT,
+ 'sw_if_index': self.pg1.sw_if_index,
+ 'match': {'mask': 0x9, 'src': self.pg0.remote_ip4, 'proto': 17,
+ 'dport': 6871},
+ 'rewrite': {'mask': 0x1, 'src': '11.11.11.11'},
+ 'send': (IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ UDP(dport=6871)),
+ 'reply': (IP(src='11.11.11.11', dst=self.pg1.remote_ip4) /
+ UDP(dport=6871))
+ },
+ {
+ 'input': PNAT_IP4_INPUT,
+ 'sw_if_index': self.pg0.sw_if_index,
+ 'match': {'mask': 0xa, 'dst': '10.10.10.10', 'proto': 17,
+ 'dport': 6871},
+ 'rewrite': {'mask': 0xa, 'dst': self.pg1.remote_ip4,
+ 'dport': 5555},
+ 'send': (IP(src=self.pg0.remote_ip4, dst='10.10.10.10') /
+ UDP(sport=65530, dport=6871)),
+ 'reply': (IP(src=self.pg0.remote_ip4,
+ dst=self.pg1.remote_ip4) /
+ UDP(sport=65530, dport=5555))
+ },
+ {
+ 'input': PNAT_IP4_INPUT,
+ 'sw_if_index': self.pg0.sw_if_index,
+ 'match': {'mask': 0xa, 'dst': self.pg1.remote_ip4, 'proto': 17,
+ 'dport': 6871},
+ 'rewrite': {'mask': 0x8, 'dport': 5555},
+ 'send': (IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ UDP(dport=6871, chksum=0)),
+ 'reply': (IP(src=self.pg0.remote_ip4,
+ dst=self.pg1.remote_ip4) /
+ UDP(dport=5555, chksum=0))
+ },
+ {
+ 'input': PNAT_IP4_INPUT,
+ 'sw_if_index': self.pg0.sw_if_index,
+ 'match': {'mask': 0x2, 'dst': self.pg1.remote_ip4, 'proto': 1},
+ 'rewrite': {'mask': 0x1, 'src': '8.8.8.8'},
+ 'send': (IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ ICMP()),
+ 'reply': IP(src='8.8.8.8', dst=self.pg1.remote_ip4)/ICMP(),
+ },
+ ]
+
+ p_ether = Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)
+ for t in tests:
+ rv = self.vapi.pnat_binding_add(match=t['match'],
+ rewrite=t['rewrite'])
+ self.vapi.pnat_binding_attach(sw_if_index=t['sw_if_index'],
+ attachment=t['input'],
+ binding_index=rv.binding_index)
+
+ reply = t['reply']
+ reply[IP].ttl -= 1
+ rx = self.send_and_expect(self.pg0, p_ether/t['send']*1, self.pg1)
+ for p in rx:
+ # p.show2()
+ self.validate(p[1], reply)
+
+ self.ping_check()
+
+ self.vapi.pnat_binding_detach(sw_if_index=t['sw_if_index'],
+ attachment=t['input'],
+ binding_index=rv.binding_index)
+ self.vapi.pnat_binding_del(binding_index=rv.binding_index)
+
+ def test_pnat_show(self):
+ """ PNAT show tests """
+
+ PNAT_IP4_INPUT = VppEnum.vl_api_pnat_attachment_point_t.PNAT_IP4_INPUT
+ PNAT_IP4_OUTPUT = \
+ VppEnum.vl_api_pnat_attachment_point_t.PNAT_IP4_OUTPUT
+
+ tests = [
+ {
+ 'input': PNAT_IP4_INPUT,
+ 'sw_if_index': self.pg0.sw_if_index,
+ 'match': {'mask': 0xa, 'dst': '10.10.10.10', 'proto': 17,
+ 'dport': 6871},
+ 'rewrite': {'mask': 0x2, 'dst': self.pg1.remote_ip4},
+ 'send': (IP(src=self.pg0.remote_ip4, dst='10.10.10.10') /
+ UDP(dport=6871)),
+ 'reply': (IP(src=self.pg0.remote_ip4,
+ dst=self.pg1.remote_ip4) /
+ UDP(dport=6871))
+ },
+ {
+ 'input': PNAT_IP4_OUTPUT,
+ 'sw_if_index': self.pg1.sw_if_index,
+ 'match': {'mask': 0x9, 'src': self.pg0.remote_ip4, 'proto': 17,
+ 'dport': 6871},
+ 'rewrite': {'mask': 0x1, 'src': '11.11.11.11'},
+ 'send': (IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ UDP(dport=6871)),
+ 'reply': (IP(src='11.11.11.11', dst=self.pg1.remote_ip4) /
+ UDP(dport=6871))
+ },
+ ]
+ binding_index = []
+ for t in tests:
+ rv = self.vapi.pnat_binding_add(match=t['match'],
+ rewrite=t['rewrite'])
+ binding_index.append(rv.binding_index)
+ self.vapi.pnat_binding_attach(sw_if_index=t['sw_if_index'],
+ attachment=t['input'],
+ binding_index=rv.binding_index)
+
+ rv, l = self.vapi.pnat_bindings_get()
+ self.assertEqual(len(l), len(tests))
+
+ rv, l = self.vapi.pnat_interfaces_get()
+ self.assertEqual(len(l), 2)
+
+ self.logger.info(self.vapi.cli("show pnat translations"))
+ self.logger.info(self.vapi.cli("show pnat interfaces"))
+
+ for i, t in enumerate(tests):
+ self.vapi.pnat_binding_detach(sw_if_index=t['sw_if_index'],
+ attachment=t['input'],
+ binding_index=binding_index[i])
+ self.vapi.pnat_binding_del(binding_index=binding_index[i])
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_policer.py b/test/test_policer.py
new file mode 100644
index 00000000000..6b15a0234a3
--- /dev/null
+++ b/test/test_policer.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python3
+# Copyright (c) 2021 Graphiant, Inc.
+
+import unittest
+
+from framework import VppTestCase, VppTestRunner
+from vpp_policer import VppPolicer, PolicerAction
+
+# Default for the tests is 10s of "Green" packets at 8Mbps, ie. 10M bytes.
+# The policer helper CLI "sends" 500 byte packets, so default is 20000.
+
+TEST_RATE = 8000 # kbps
+TEST_BURST = 10000 # ms
+
+CIR_OK = 8500 # CIR in kbps, above test rate
+CIR_LOW = 7000 # CIR in kbps, below test rate
+EIR_OK = 9000 # EIR in kbps, above test rate
+EIR_LOW = 7500 # EIR in kbps, below test rate
+
+NUM_PKTS = 20000
+
+CBURST = 100000 # Committed burst in bytes
+EBURST = 200000 # Excess burst in bytes
+
+
+class TestPolicer(VppTestCase):
+ """ Policer Test Case """
+
+ def run_policer_test(self, type, cir, cb, eir, eb, rate=8000, burst=10000,
+ colour=0):
+ """
+ Configure a Policer and push traffic through it.
+ """
+ types = {
+ '1R2C': 0,
+ '1R3C': 1,
+ '2R3C': 3,
+ }
+
+ pol_type = types.get(type)
+ policer = VppPolicer(self, "pol1", cir, eir, cb, eb, rate_type=0,
+ type=pol_type, color_aware=colour)
+ policer.add_vpp_config()
+
+ error = self.vapi.cli(
+ f"test policing index {policer.policer_index} rate {rate} "
+ f"burst {burst} colour {colour}")
+
+ stats = policer.get_stats()
+ policer.remove_vpp_config()
+
+ return stats
+
+ def test_policer_1r2c(self):
+ """ Single rate, 2 colour policer """
+ stats = self.run_policer_test("1R2C", CIR_OK, CBURST, 0, 0)
+ self.assertEqual(stats['conform_packets'], NUM_PKTS)
+
+ stats = self.run_policer_test("1R2C", CIR_LOW, CBURST, 0, 0)
+ self.assertLess(stats['conform_packets'], NUM_PKTS)
+ self.assertEqual(stats['exceed_packets'], 0)
+ self.assertGreater(stats['violate_packets'], 0)
+
+ stats = self.run_policer_test("1R2C", CIR_LOW, CBURST, 0, 0, colour=2)
+ self.assertEqual(stats['violate_packets'], NUM_PKTS)
+
+ def test_policer_1r3c(self):
+ """ Single rate, 3 colour policer """
+ stats = self.run_policer_test("1R3C", CIR_OK, CBURST, 0, 0)
+ self.assertEqual(stats['conform_packets'], NUM_PKTS)
+
+ stats = self.run_policer_test("1R3C", CIR_LOW, CBURST, 0, EBURST)
+ self.assertLess(stats['conform_packets'], NUM_PKTS)
+ self.assertGreater(stats['exceed_packets'], 0)
+ self.assertGreater(stats['violate_packets'], 0)
+
+ stats = self.run_policer_test("1R3C", CIR_LOW, CBURST, 0, EBURST,
+ colour=1)
+ self.assertEqual(stats['conform_packets'], 0)
+ self.assertGreater(stats['exceed_packets'], 0)
+ self.assertGreater(stats['violate_packets'], 0)
+
+ stats = self.run_policer_test("1R3C", CIR_LOW, CBURST, 0, EBURST,
+ colour=2)
+ self.assertEqual(stats['violate_packets'], NUM_PKTS)
+
+ def test_policer_2r3c(self):
+ """ Dual rate, 3 colour policer """
+ stats = self.run_policer_test("2R3C", CIR_OK, CBURST, EIR_OK, EBURST)
+ self.assertEqual(stats['conform_packets'], NUM_PKTS)
+
+ stats = self.run_policer_test("2R3C", CIR_LOW, CBURST, EIR_OK, EBURST)
+ self.assertLess(stats['conform_packets'], NUM_PKTS)
+ self.assertGreater(stats['exceed_packets'], 0)
+ self.assertEqual(stats['violate_packets'], 0)
+
+ stats = self.run_policer_test("2R3C", CIR_LOW, CBURST, EIR_LOW, EBURST)
+ self.assertLess(stats['conform_packets'], NUM_PKTS)
+ self.assertGreater(stats['exceed_packets'], 0)
+ self.assertGreater(stats['violate_packets'], 0)
+
+ stats = self.run_policer_test("2R3C", CIR_LOW, CBURST, EIR_OK, EBURST,
+ colour=1)
+ self.assertEqual(stats['exceed_packets'], NUM_PKTS)
+
+ stats = self.run_policer_test("2R3C", CIR_LOW, CBURST, EIR_LOW, EBURST,
+ colour=1)
+ self.assertEqual(stats['conform_packets'], 0)
+ self.assertGreater(stats['exceed_packets'], 0)
+ self.assertGreater(stats['violate_packets'], 0)
+
+ stats = self.run_policer_test("2R3C", CIR_LOW, CBURST, EIR_OK, EBURST,
+ colour=2)
+ self.assertEqual(stats['violate_packets'], NUM_PKTS)
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_policer_input.py b/test/test_policer_input.py
new file mode 100644
index 00000000000..c95f6643ff2
--- /dev/null
+++ b/test/test_policer_input.py
@@ -0,0 +1,146 @@
+#!/usr/bin/env python3
+# Copyright (c) 2021 Graphiant, Inc.
+
+import unittest
+import scapy.compat
+from scapy.layers.inet import IP, UDP
+from scapy.layers.l2 import Ether
+from scapy.packet import Raw
+from framework import VppTestCase, VppTestRunner
+from vpp_papi import VppEnum
+from vpp_policer import VppPolicer, PolicerAction
+
+NUM_PKTS = 67
+
+
+class TestPolicerInput(VppTestCase):
+ """ Policer on an input interface """
+ vpp_worker_count = 2
+
+ def setUp(self):
+ super(TestPolicerInput, self).setUp()
+
+ self.create_pg_interfaces(range(2))
+ for i in self.pg_interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+
+ self.pkt = (Ether(src=self.pg0.remote_mac,
+ dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+
+ def tearDown(self):
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.admin_down()
+ super(TestPolicerInput, self).tearDown()
+
+ def test_policer_input(self):
+ """ Input Policing """
+ pkts = self.pkt * NUM_PKTS
+
+ action_tx = PolicerAction(
+ VppEnum.vl_api_sse2_qos_action_type_t.SSE2_QOS_ACTION_API_TRANSMIT,
+ 0)
+ policer = VppPolicer(self, "pol1", 80, 0, 1000, 0,
+ conform_action=action_tx,
+ exceed_action=action_tx,
+ violate_action=action_tx)
+ policer.add_vpp_config()
+
+ # Start policing on pg0
+ policer.apply_vpp_config(self.pg0.sw_if_index, True)
+
+ rx = self.send_and_expect(self.pg0, pkts, self.pg1, worker=0)
+ stats = policer.get_stats()
+
+ # Single rate, 2 colour policer - expect conform, violate but no exceed
+ self.assertGreater(stats['conform_packets'], 0)
+ self.assertEqual(stats['exceed_packets'], 0)
+ self.assertGreater(stats['violate_packets'], 0)
+
+ # Stop policing on pg0
+ policer.apply_vpp_config(self.pg0.sw_if_index, False)
+
+ rx = self.send_and_expect(self.pg0, pkts, self.pg1, worker=0)
+
+ statsnew = policer.get_stats()
+
+ # No new packets counted
+ self.assertEqual(stats, statsnew)
+
+ policer.remove_vpp_config()
+
+ def test_policer_handoff(self):
+ """ Worker thread handoff """
+ pkts = self.pkt * NUM_PKTS
+
+ action_tx = PolicerAction(
+ VppEnum.vl_api_sse2_qos_action_type_t.SSE2_QOS_ACTION_API_TRANSMIT,
+ 0)
+ policer = VppPolicer(self, "pol2", 80, 0, 1000, 0,
+ conform_action=action_tx,
+ exceed_action=action_tx,
+ violate_action=action_tx)
+ policer.add_vpp_config()
+
+ # Bind the policer to worker 1
+ policer.bind_vpp_config(1, True)
+
+ # Start policing on pg0
+ policer.apply_vpp_config(self.pg0.sw_if_index, True)
+
+ for worker in [0, 1]:
+ self.send_and_expect(self.pg0, pkts, self.pg1, worker=worker)
+ self.logger.debug(self.vapi.cli("show trace max 100"))
+
+ stats = policer.get_stats()
+ stats0 = policer.get_stats(worker=0)
+ stats1 = policer.get_stats(worker=1)
+
+ # Worker 1, should have done all the policing
+ self.assertEqual(stats, stats1)
+
+ # Worker 0, should have handed everything off
+ self.assertEqual(stats0['conform_packets'], 0)
+ self.assertEqual(stats0['exceed_packets'], 0)
+ self.assertEqual(stats0['violate_packets'], 0)
+
+ # Unbind the policer from worker 1 and repeat
+ policer.bind_vpp_config(1, False)
+ for worker in [0, 1]:
+ self.send_and_expect(self.pg0, pkts, self.pg1, worker=worker)
+ self.logger.debug(self.vapi.cli("show trace max 100"))
+
+ # The policer should auto-bind to worker 0 when packets arrive
+ stats = policer.get_stats()
+
+ # The 2 workers should now have policed the same amount
+ stats = policer.get_stats()
+ stats0 = policer.get_stats(worker=0)
+ stats1 = policer.get_stats(worker=1)
+
+ self.assertGreater(stats0['conform_packets'], 0)
+ self.assertEqual(stats0['exceed_packets'], 0)
+ self.assertGreater(stats0['violate_packets'], 0)
+
+ self.assertGreater(stats1['conform_packets'], 0)
+ self.assertEqual(stats1['exceed_packets'], 0)
+ self.assertGreater(stats1['violate_packets'], 0)
+
+ self.assertEqual(stats0['conform_packets'] + stats1['conform_packets'],
+ stats['conform_packets'])
+
+ self.assertEqual(stats0['violate_packets'] + stats1['violate_packets'],
+ stats['violate_packets'])
+
+ # Stop policing on pg0
+ policer.apply_vpp_config(self.pg0.sw_if_index, False)
+
+ policer.remove_vpp_config()
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_pppoe.py b/test/test_pppoe.py
new file mode 100644
index 00000000000..99dba01cdc9
--- /dev/null
+++ b/test/test_pppoe.py
@@ -0,0 +1,611 @@
+#!/usr/bin/env python3
+
+import socket
+import unittest
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.ppp import PPPoE, PPPoED, PPP
+from scapy.layers.inet import IP
+
+from framework import VppTestCase, VppTestRunner
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_pppoe_interface import VppPppoeInterface
+from util import ppp, ppc
+
+
+class TestPPPoE(VppTestCase):
+ """ PPPoE Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestPPPoE, cls).setUpClass()
+
+ cls.session_id = 1
+ cls.dst_ip = "100.1.1.100"
+ cls.dst_ipn = socket.inet_pton(socket.AF_INET, cls.dst_ip)
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestPPPoE, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestPPPoE, self).setUp()
+
+ # create 2 pg interfaces
+ self.create_pg_interfaces(range(3))
+
+ for i in self.pg_interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+
+ def tearDown(self):
+ super(TestPPPoE, self).tearDown()
+
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.admin_down()
+
+ def show_commands_at_teardown(self):
+ self.logger.info(self.vapi.cli("show int"))
+ self.logger.info(self.vapi.cli("show pppoe fib"))
+ self.logger.info(self.vapi.cli("show pppoe session"))
+ self.logger.info(self.vapi.cli("show ip fib"))
+ self.logger.info(self.vapi.cli("show trace"))
+
+ def create_stream_pppoe_discovery(self, src_if, dst_if,
+ client_mac, count=1):
+ packets = []
+ for i in range(count):
+ # create packet info stored in the test case instance
+ info = self.create_packet_info(src_if, dst_if)
+ # convert the info into packet payload
+ payload = self.info_to_payload(info)
+ # create the packet itself
+ p = (Ether(dst=src_if.local_mac, src=client_mac) /
+ PPPoED(sessionid=0) /
+ Raw(payload))
+ # store a copy of the packet in the packet info
+ info.data = p.copy()
+ # append the packet to the list
+ packets.append(p)
+
+ # return the created packet list
+ return packets
+
+ def create_stream_pppoe_lcp(self, src_if, dst_if,
+ client_mac, session_id, count=1):
+ packets = []
+ for i in range(count):
+ # create packet info stored in the test case instance
+ info = self.create_packet_info(src_if, dst_if)
+ # convert the info into packet payload
+ payload = self.info_to_payload(info)
+ # create the packet itself
+ p = (Ether(dst=src_if.local_mac, src=client_mac) /
+ PPPoE(sessionid=session_id) /
+ PPP(proto=0xc021) /
+ Raw(payload))
+ # store a copy of the packet in the packet info
+ info.data = p.copy()
+ # append the packet to the list
+ packets.append(p)
+
+ # return the created packet list
+ return packets
+
+ def create_stream_pppoe_ip4(self, src_if, dst_if,
+ client_mac, session_id, client_ip, count=1):
+ packets = []
+ for i in range(count):
+ # create packet info stored in the test case instance
+ info = self.create_packet_info(src_if, dst_if)
+ # convert the info into packet payload
+ payload = self.info_to_payload(info)
+ # create the packet itself
+ p = (Ether(dst=src_if.local_mac, src=client_mac) /
+ PPPoE(sessionid=session_id) /
+ PPP(proto=0x0021) /
+ IP(src=client_ip, dst=self.dst_ip) /
+ Raw(payload))
+ # store a copy of the packet in the packet info
+ info.data = p.copy()
+ # append the packet to the list
+ packets.append(p)
+
+ # return the created packet list
+ return packets
+
+ def create_stream_ip4(self, src_if, dst_if, client_ip, dst_ip, count=1):
+ pkts = []
+ for i in range(count):
+ # create packet info stored in the test case instance
+ info = self.create_packet_info(src_if, dst_if)
+ # convert the info into packet payload
+ payload = self.info_to_payload(info)
+ # create the packet itself
+ p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ IP(src=dst_ip, dst=client_ip) /
+ Raw(payload))
+ # store a copy of the packet in the packet info
+ info.data = p.copy()
+ # append the packet to the list
+ pkts.append(p)
+
+ # return the created packet list
+ return pkts
+
+ def verify_decapped_pppoe(self, src_if, capture, sent):
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ try:
+ tx = sent[i]
+ rx = capture[i]
+
+ tx_ip = tx[IP]
+ rx_ip = rx[IP]
+
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+
+ except:
+ self.logger.error(ppp("Rx:", rx))
+ self.logger.error(ppp("Tx:", tx))
+ raise
+
+ def verify_encaped_pppoe(self, src_if, capture, sent, session_id):
+
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ try:
+ tx = sent[i]
+ rx = capture[i]
+
+ tx_ip = tx[IP]
+ rx_ip = rx[IP]
+
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+
+ rx_pppoe = rx[PPPoE]
+
+ self.assertEqual(rx_pppoe.sessionid, session_id)
+
+ except:
+ self.logger.error(ppp("Rx:", rx))
+ self.logger.error(ppp("Tx:", tx))
+ raise
+
+ def test_PPPoE_Decap(self):
+ """ PPPoE Decap Test """
+
+ self.vapi.cli("clear trace")
+
+ #
+ # Add a route that resolves the server's destination
+ #
+ route_sever_dst = VppIpRoute(self, "100.1.1.100", 32,
+ [VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index)])
+ route_sever_dst.add_vpp_config()
+
+ # Send PPPoE Discovery
+ tx0 = self.create_stream_pppoe_discovery(self.pg0, self.pg1,
+ self.pg0.remote_mac)
+ self.pg0.add_stream(tx0)
+ self.pg_start()
+
+ # Send PPPoE PPP LCP
+ tx1 = self.create_stream_pppoe_lcp(self.pg0, self.pg1,
+ self.pg0.remote_mac,
+ self.session_id)
+ self.pg0.add_stream(tx1)
+ self.pg_start()
+
+ # Create PPPoE session
+ pppoe_if = VppPppoeInterface(self,
+ self.pg0.remote_ip4,
+ self.pg0.remote_mac,
+ self.session_id)
+ pppoe_if.add_vpp_config()
+ pppoe_if.set_unnumbered(self.pg0.sw_if_index)
+
+ #
+ # Send tunneled packets that match the created tunnel and
+ # are decapped and forwarded
+ #
+ tx2 = self.create_stream_pppoe_ip4(self.pg0, self.pg1,
+ self.pg0.remote_mac,
+ self.session_id,
+ self.pg0.remote_ip4)
+ self.pg0.add_stream(tx2)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx2 = self.pg1.get_capture(len(tx2))
+ self.verify_decapped_pppoe(self.pg0, rx2, tx2)
+
+ self.logger.info(self.vapi.cli("show pppoe fib"))
+ self.logger.info(self.vapi.cli("show pppoe session"))
+ self.logger.info(self.vapi.cli("show ip fib"))
+
+ #
+ # test case cleanup
+ #
+
+ # Delete PPPoE session
+ pppoe_if.remove_vpp_config()
+
+ # Delete a route that resolves the server's destination
+ route_sever_dst.remove_vpp_config()
+
+ def test_PPPoE_Encap(self):
+ """ PPPoE Encap Test """
+
+ self.vapi.cli("clear trace")
+
+ #
+ # Add a route that resolves the server's destination
+ #
+ route_sever_dst = VppIpRoute(self, "100.1.1.100", 32,
+ [VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index)])
+ route_sever_dst.add_vpp_config()
+
+ # Send PPPoE Discovery
+ tx0 = self.create_stream_pppoe_discovery(self.pg0, self.pg1,
+ self.pg0.remote_mac)
+ self.pg0.add_stream(tx0)
+ self.pg_start()
+
+ # Send PPPoE PPP LCP
+ tx1 = self.create_stream_pppoe_lcp(self.pg0, self.pg1,
+ self.pg0.remote_mac,
+ self.session_id)
+ self.pg0.add_stream(tx1)
+ self.pg_start()
+
+ # Create PPPoE session
+ pppoe_if = VppPppoeInterface(self,
+ self.pg0.remote_ip4,
+ self.pg0.remote_mac,
+ self.session_id)
+ pppoe_if.add_vpp_config()
+ pppoe_if.set_unnumbered(self.pg0.sw_if_index)
+
+ #
+ # Send a packet stream that is routed into the session
+ # - packets are PPPoE encapped
+ #
+ self.vapi.cli("clear trace")
+ tx2 = self.create_stream_ip4(self.pg1, self.pg0,
+ self.pg0.remote_ip4, self.dst_ip, 65)
+ self.pg1.add_stream(tx2)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx2 = self.pg0.get_capture(len(tx2))
+ self.verify_encaped_pppoe(self.pg1, rx2, tx2, self.session_id)
+
+ self.logger.info(self.vapi.cli("show pppoe fib"))
+ self.logger.info(self.vapi.cli("show pppoe session"))
+ self.logger.info(self.vapi.cli("show ip fib"))
+ self.logger.info(self.vapi.cli("show adj"))
+
+ #
+ # test case cleanup
+ #
+
+ # Delete PPPoE session
+ pppoe_if.remove_vpp_config()
+
+ # Delete a route that resolves the server's destination
+ route_sever_dst.remove_vpp_config()
+
+ def test_PPPoE_Add_Twice(self):
+ """ PPPoE Add Same Session Twice Test """
+
+ self.vapi.cli("clear trace")
+
+ #
+ # Add a route that resolves the server's destination
+ #
+ route_sever_dst = VppIpRoute(self, "100.1.1.100", 32,
+ [VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index)])
+ route_sever_dst.add_vpp_config()
+
+ # Send PPPoE Discovery
+ tx0 = self.create_stream_pppoe_discovery(self.pg0, self.pg1,
+ self.pg0.remote_mac)
+ self.pg0.add_stream(tx0)
+ self.pg_start()
+
+ # Send PPPoE PPP LCP
+ tx1 = self.create_stream_pppoe_lcp(self.pg0, self.pg1,
+ self.pg0.remote_mac,
+ self.session_id)
+ self.pg0.add_stream(tx1)
+ self.pg_start()
+
+ # Create PPPoE session
+ pppoe_if = VppPppoeInterface(self,
+ self.pg0.remote_ip4,
+ self.pg0.remote_mac,
+ self.session_id)
+ pppoe_if.add_vpp_config()
+ pppoe_if.set_unnumbered(self.pg0.sw_if_index)
+
+ #
+ # The double create (create the same session twice) should fail,
+ # and we should still be able to use the original
+ #
+ try:
+ pppoe_if.add_vpp_config()
+ except Exception:
+ pass
+ else:
+ self.fail("Double GRE tunnel add does not fail")
+
+ #
+ # test case cleanup
+ #
+
+ # Delete PPPoE session
+ pppoe_if.remove_vpp_config()
+
+ # Delete a route that resolves the server's destination
+ route_sever_dst.remove_vpp_config()
+
+ def test_PPPoE_Del_Twice(self):
+ """ PPPoE Delete Same Session Twice Test """
+
+ self.vapi.cli("clear trace")
+
+ #
+ # Add a route that resolves the server's destination
+ #
+ route_sever_dst = VppIpRoute(self, "100.1.1.100", 32,
+ [VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index)])
+ route_sever_dst.add_vpp_config()
+
+ # Send PPPoE Discovery
+ tx0 = self.create_stream_pppoe_discovery(self.pg0, self.pg1,
+ self.pg0.remote_mac)
+ self.pg0.add_stream(tx0)
+ self.pg_start()
+
+ # Send PPPoE PPP LCP
+ tx1 = self.create_stream_pppoe_lcp(self.pg0, self.pg1,
+ self.pg0.remote_mac,
+ self.session_id)
+ self.pg0.add_stream(tx1)
+ self.pg_start()
+
+ # Create PPPoE session
+ pppoe_if = VppPppoeInterface(self,
+ self.pg0.remote_ip4,
+ self.pg0.remote_mac,
+ self.session_id)
+ pppoe_if.add_vpp_config()
+
+ # Delete PPPoE session
+ pppoe_if.remove_vpp_config()
+
+ #
+ # The double del (del the same session twice) should fail,
+ # and we should still be able to use the original
+ #
+ try:
+ pppoe_if.remove_vpp_config()
+ except Exception:
+ pass
+ else:
+ self.fail("Double GRE tunnel del does not fail")
+
+ #
+ # test case cleanup
+ #
+
+ # Delete a route that resolves the server's destination
+ route_sever_dst.remove_vpp_config()
+
+ def test_PPPoE_Decap_Multiple(self):
+ """ PPPoE Decap Multiple Sessions Test """
+
+ self.vapi.cli("clear trace")
+
+ #
+ # Add a route that resolves the server's destination
+ #
+ route_sever_dst = VppIpRoute(self, "100.1.1.100", 32,
+ [VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index)])
+ route_sever_dst.add_vpp_config()
+
+ # Send PPPoE Discovery 1
+ tx0 = self.create_stream_pppoe_discovery(self.pg0, self.pg1,
+ self.pg0.remote_mac)
+ self.pg0.add_stream(tx0)
+ self.pg_start()
+
+ # Send PPPoE PPP LCP 1
+ tx1 = self.create_stream_pppoe_lcp(self.pg0, self.pg1,
+ self.pg0.remote_mac,
+ self.session_id)
+ self.pg0.add_stream(tx1)
+ self.pg_start()
+
+ # Create PPPoE session 1
+ pppoe_if1 = VppPppoeInterface(self,
+ self.pg0.remote_ip4,
+ self.pg0.remote_mac,
+ self.session_id)
+ pppoe_if1.add_vpp_config()
+ pppoe_if1.set_unnumbered(self.pg0.sw_if_index)
+
+ # Send PPPoE Discovery 2
+ tx3 = self.create_stream_pppoe_discovery(self.pg2, self.pg1,
+ self.pg2.remote_mac)
+ self.pg2.add_stream(tx3)
+ self.pg_start()
+
+ # Send PPPoE PPP LCP 2
+ tx4 = self.create_stream_pppoe_lcp(self.pg2, self.pg1,
+ self.pg2.remote_mac,
+ self.session_id + 1)
+ self.pg2.add_stream(tx4)
+ self.pg_start()
+
+ # Create PPPoE session 2
+ pppoe_if2 = VppPppoeInterface(self,
+ self.pg2.remote_ip4,
+ self.pg2.remote_mac,
+ self.session_id + 1)
+ pppoe_if2.add_vpp_config()
+ pppoe_if2.set_unnumbered(self.pg0.sw_if_index)
+
+ #
+ # Send tunneled packets that match the created tunnel and
+ # are decapped and forwarded
+ #
+ tx2 = self.create_stream_pppoe_ip4(self.pg0, self.pg1,
+ self.pg0.remote_mac,
+ self.session_id,
+ self.pg0.remote_ip4)
+ self.pg0.add_stream(tx2)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx2 = self.pg1.get_capture(len(tx2))
+ self.verify_decapped_pppoe(self.pg0, rx2, tx2)
+
+ tx5 = self.create_stream_pppoe_ip4(self.pg2, self.pg1,
+ self.pg2.remote_mac,
+ self.session_id + 1,
+ self.pg2.remote_ip4)
+ self.pg2.add_stream(tx5)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx5 = self.pg1.get_capture(len(tx5))
+ self.verify_decapped_pppoe(self.pg2, rx5, tx5)
+
+ self.logger.info(self.vapi.cli("show pppoe fib"))
+ self.logger.info(self.vapi.cli("show pppoe session"))
+ self.logger.info(self.vapi.cli("show ip fib"))
+
+ #
+ # test case cleanup
+ #
+
+ # Delete PPPoE session
+ pppoe_if1.remove_vpp_config()
+ pppoe_if2.remove_vpp_config()
+
+ # Delete a route that resolves the server's destination
+ route_sever_dst.remove_vpp_config()
+
+ def test_PPPoE_Encap_Multiple(self):
+ """ PPPoE Encap Multiple Sessions Test """
+
+ self.vapi.cli("clear trace")
+
+ #
+ # Add a route that resolves the server's destination
+ #
+ route_sever_dst = VppIpRoute(self, "100.1.1.100", 32,
+ [VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index)])
+ route_sever_dst.add_vpp_config()
+
+ # Send PPPoE Discovery 1
+ tx0 = self.create_stream_pppoe_discovery(self.pg0, self.pg1,
+ self.pg0.remote_mac)
+ self.pg0.add_stream(tx0)
+ self.pg_start()
+
+ # Send PPPoE PPP LCP 1
+ tx1 = self.create_stream_pppoe_lcp(self.pg0, self.pg1,
+ self.pg0.remote_mac,
+ self.session_id)
+ self.pg0.add_stream(tx1)
+ self.pg_start()
+
+ # Create PPPoE session 1
+ pppoe_if1 = VppPppoeInterface(self,
+ self.pg0.remote_ip4,
+ self.pg0.remote_mac,
+ self.session_id)
+ pppoe_if1.add_vpp_config()
+
+ # Send PPPoE Discovery 2
+ tx3 = self.create_stream_pppoe_discovery(self.pg2, self.pg1,
+ self.pg2.remote_mac)
+ self.pg2.add_stream(tx3)
+ self.pg_start()
+
+ # Send PPPoE PPP LCP 2
+ tx4 = self.create_stream_pppoe_lcp(self.pg2, self.pg1,
+ self.pg2.remote_mac,
+ self.session_id + 1)
+ self.pg2.add_stream(tx4)
+ self.pg_start()
+
+ # Create PPPoE session 2
+ pppoe_if2 = VppPppoeInterface(self,
+ self.pg2.remote_ip4,
+ self.pg2.remote_mac,
+ self.session_id + 1)
+ pppoe_if2.add_vpp_config()
+
+ #
+ # Send a packet stream that is routed into the session
+ # - packets are PPPoE encapped
+ #
+ self.vapi.cli("clear trace")
+ tx2 = self.create_stream_ip4(self.pg1, self.pg0,
+ self.pg0.remote_ip4, self.dst_ip)
+ self.pg1.add_stream(tx2)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx2 = self.pg0.get_capture(len(tx2))
+ self.verify_encaped_pppoe(self.pg1, rx2, tx2, self.session_id)
+
+ tx5 = self.create_stream_ip4(self.pg1, self.pg2,
+ self.pg2.remote_ip4, self.dst_ip)
+ self.pg1.add_stream(tx5)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx5 = self.pg2.get_capture(len(tx5))
+ self.verify_encaped_pppoe(self.pg1, rx5, tx5, self.session_id + 1)
+
+ self.logger.info(self.vapi.cli("show pppoe fib"))
+ self.logger.info(self.vapi.cli("show pppoe session"))
+ self.logger.info(self.vapi.cli("show ip fib"))
+
+ #
+ # test case cleanup
+ #
+
+ # Delete PPPoE session
+ pppoe_if1.remove_vpp_config()
+ pppoe_if2.remove_vpp_config()
+
+ # Delete a route that resolves the server's destination
+ route_sever_dst.remove_vpp_config()
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_quic.py b/test/test_quic.py
new file mode 100644
index 00000000000..1257f4e2b0a
--- /dev/null
+++ b/test/test_quic.py
@@ -0,0 +1,554 @@
+#!/usr/bin/env python3
+""" Vpp QUIC tests """
+
+import unittest
+import os
+import subprocess
+import signal
+from framework import tag_fixme_vpp_workers
+from framework import VppTestCase, VppTestRunner, running_extended_tests, \
+ Worker
+from vpp_ip_route import VppIpTable, VppIpRoute, VppRoutePath
+
+
+class QUICAppWorker(Worker):
+ """ QUIC Test Application Worker """
+ process = None
+
+ def __init__(self, build_dir, appname, executable_args, logger, role,
+ testcase, env=None, *args, **kwargs):
+ if env is None:
+ env = {}
+ app = "%s/vpp/bin/%s" % (build_dir, appname)
+ self.args = [app] + executable_args
+ self.role = role
+ self.wait_for_gdb = 'wait-for-gdb'
+ self.testcase = testcase
+ super(QUICAppWorker, self).__init__(self.args, logger, env,
+ *args, **kwargs)
+
+ def run(self):
+ super(QUICAppWorker, self).run()
+
+ def teardown(self, logger, timeout):
+ if self.process is None:
+ return False
+ try:
+ logger.debug("Killing worker process (pid %d)" % self.process.pid)
+ os.killpg(os.getpgid(self.process.pid), signal.SIGKILL)
+ self.join(timeout)
+ except OSError as e:
+ logger.debug("Couldn't kill worker process")
+ return True
+ return False
+
+
+class QUICTestCase(VppTestCase):
+ """ QUIC Test Case """
+
+ timeout = 20
+ pre_test_sleep = 0.3
+ post_test_sleep = 0.3
+
+ @classmethod
+ def setUpClass(cls):
+ cls.extra_vpp_plugin_config.append("plugin quic_plugin.so { enable }")
+ super(QUICTestCase, cls).setUpClass()
+
+ def setUp(self):
+ super(QUICTestCase, self).setUp()
+ var = "VPP_BUILD_DIR"
+ self.build_dir = os.getenv(var, None)
+ if self.build_dir is None:
+ raise Exception("Environment variable `%s' not set" % var)
+ self.vppDebug = 'vpp_debug' in self.build_dir
+
+ self.create_loopback_interfaces(2)
+ self.uri = "quic://%s/1234" % self.loop0.local_ip4
+ table_id = 1
+ for i in self.lo_interfaces:
+ i.admin_up()
+
+ if table_id != 0:
+ tbl = VppIpTable(self, table_id)
+ tbl.add_vpp_config()
+
+ i.set_table_ip4(table_id)
+ i.config_ip4()
+ table_id += 1
+
+ # Configure namespaces
+ self.vapi.app_namespace_add_del(namespace_id="server",
+ sw_if_index=self.loop0.sw_if_index)
+ self.vapi.app_namespace_add_del(namespace_id="client",
+ sw_if_index=self.loop1.sw_if_index)
+
+ # Add inter-table routes
+ self.ip_t01 = VppIpRoute(self, self.loop1.local_ip4, 32,
+ [VppRoutePath("0.0.0.0",
+ 0xffffffff,
+ nh_table_id=2)], table_id=1)
+ self.ip_t10 = VppIpRoute(self, self.loop0.local_ip4, 32,
+ [VppRoutePath("0.0.0.0",
+ 0xffffffff,
+ nh_table_id=1)], table_id=2)
+ self.ip_t01.add_vpp_config()
+ self.ip_t10.add_vpp_config()
+ self.logger.debug(self.vapi.cli("show ip fib"))
+
+ def tearDown(self):
+ # Delete inter-table routes
+ self.ip_t01.remove_vpp_config()
+ self.ip_t10.remove_vpp_config()
+
+ for i in self.lo_interfaces:
+ i.unconfig_ip4()
+ i.set_table_ip4(0)
+ i.admin_down()
+ super(QUICTestCase, self).tearDown()
+
+
+class QUICEchoIntTestCase(QUICTestCase):
+ """QUIC Echo Internal Test Case"""
+ test_bytes = ' test-bytes'
+ extra_vpp_punt_config = ["session", "{", "enable", "poll-main", "}"]
+
+ def setUp(self):
+ super(QUICEchoIntTestCase, self).setUp()
+ self.client_args = 'uri {uri} fifo-size 64{testbytes} appns client' \
+ .format(uri=self.uri, testbytes=self.test_bytes)
+ self.server_args = "uri %s fifo-size 64 appns server" % self.uri
+
+ def tearDown(self):
+ super(QUICEchoIntTestCase, self).tearDown()
+
+ def server(self, *args):
+ error = self.vapi.cli(
+ "test echo server %s %s" %
+ (self.server_args, ' '.join(args)))
+ if error:
+ self.logger.critical(error)
+ self.assertNotIn("failed", error)
+
+ def client(self, *args):
+ error = self.vapi.cli(
+ "test echo client %s %s" %
+ (self.client_args, ' '.join(args)))
+ if error:
+ self.logger.critical(error)
+ self.assertNotIn("failed", error)
+
+
+@tag_fixme_vpp_workers
+class QUICEchoIntTransferTestCase(QUICEchoIntTestCase):
+ """QUIC Echo Internal Transfer Test Case"""
+ def test_quic_int_transfer(self):
+ """QUIC internal transfer"""
+ self.server()
+ self.client("no-output", "mbytes", "2")
+
+
+@tag_fixme_vpp_workers
+class QUICEchoIntSerialTestCase(QUICEchoIntTestCase):
+ """QUIC Echo Internal Serial Transfer Test Case"""
+ def test_quic_serial_int_transfer(self):
+ """QUIC serial internal transfer"""
+ self.server()
+ self.client("no-output", "mbytes", "2")
+ self.client("no-output", "mbytes", "2")
+ self.client("no-output", "mbytes", "2")
+ self.client("no-output", "mbytes", "2")
+ self.client("no-output", "mbytes", "2")
+
+
+@tag_fixme_vpp_workers
+class QUICEchoIntMStreamTestCase(QUICEchoIntTestCase):
+ """QUIC Echo Internal MultiStream Test Case"""
+ def test_quic_int_multistream_transfer(self):
+ """QUIC internal multi-stream transfer"""
+ self.server()
+ self.client("nclients", "10", "mbytes", "1", "no-output")
+
+
+class QUICEchoExtTestCase(QUICTestCase):
+ quic_setup = "default"
+ test_bytes = "test-bytes:assert"
+ pre_test_sleep = 1
+ post_test_sleep = 1
+ app = "vpp_echo"
+ evt_q_len = 16384
+ vpp_worker_count = 1
+ server_fifo_size = "1M"
+ client_fifo_size = "4M"
+ extra_vpp_punt_config = ["session", "{",
+ "enable", "poll-main", "evt_qs_memfd_seg",
+ "evt_qs_seg_size", "64M",
+ "event-queue-length", f"{evt_q_len}",
+ "preallocated-sessions", "1024",
+ "v4-session-table-buckets", "20000",
+ "v4-session-table-memory", "64M",
+ "v4-halfopen-table-buckets", "20000",
+ "v4-halfopen-table-memory", "64M",
+ "local-endpoints-table-buckets", "250000",
+ "local-endpoints-table-memory", "512M",
+ "}"]
+
+ def setUp(self):
+ super(QUICEchoExtTestCase, self).setUp()
+ common_args = [
+ "uri", self.uri,
+ "json",
+ self.test_bytes,
+ "socket-name", self.get_api_sock_path(),
+ "quic-setup", self.quic_setup,
+ "nthreads", "1",
+ "mq-size", f"{self.evt_q_len}"
+ ]
+ self.server_echo_test_args = common_args + \
+ ["server", "appns", "server", "fifo-size",
+ f"{self.server_fifo_size}"]
+ self.client_echo_test_args = common_args + \
+ ["client", "appns", "client", "fifo-size",
+ f"{self.client_fifo_size}"]
+ error = self.vapi.cli("quic set fifo-size 2M")
+ if error:
+ self.logger.critical(error)
+ self.assertNotIn("failed", error)
+
+ def server(self, *args):
+ _args = self.server_echo_test_args + list(args)
+ self.worker_server = QUICAppWorker(
+ self.build_dir,
+ self.app,
+ _args,
+ self.logger,
+ 'server',
+ self)
+ self.worker_server.start()
+ self.sleep(self.pre_test_sleep)
+
+ def client(self, *args):
+ _args = self.client_echo_test_args + list(args)
+ self.worker_client = QUICAppWorker(
+ self.build_dir,
+ self.app,
+ _args,
+ self.logger,
+ 'client',
+ self)
+ self.worker_client.start()
+ timeout = None if self.debug_all else self.timeout
+ self.worker_client.join(timeout)
+ if self.worker_client.is_alive():
+ error = f"Client failed to complete in {timeout} seconds!"
+ self.logger.critical(error)
+ return
+ self.worker_server.join(timeout)
+ if self.worker_server.is_alive():
+ error = f"Server failed to complete in {timeout} seconds!"
+ self.logger.critical(error)
+ self.sleep(self.post_test_sleep)
+
+ def validate_ext_test_results(self):
+ server_result = self.worker_server.result
+ client_result = self.worker_client.result
+ self.logger.info("Server worker result is `%s'" %
+ server_result)
+ self.logger.info("Client worker result is `%s'" %
+ client_result)
+ server_kill_error = False
+ if self.worker_server.result is None:
+ server_kill_error = self.worker_server.teardown(
+ self.logger, self.timeout)
+ if self.worker_client.result is None:
+ self.worker_client.teardown(self.logger, self.timeout)
+ err_msg = "Wrong server worker return code (%s)" % server_result
+ self.assertEqual(server_result, 0, err_msg)
+ self.assertIsNotNone(
+ client_result,
+ "Timeout! Client worker did not finish in %ss" %
+ self.timeout)
+ err_msg = "Wrong client worker return code (%s)" % client_result
+ self.assertEqual(client_result, 0, err_msg)
+ self.assertFalse(server_kill_error, "Server kill errored")
+
+
+class QUICEchoExtTransferTestCase(QUICEchoExtTestCase):
+ """QUIC Echo External Transfer Test Case"""
+ timeout = 60
+
+ def test_quic_ext_transfer(self):
+ """QUIC external transfer"""
+ self.server()
+ self.client()
+ self.validate_ext_test_results()
+
+
+class QUICEchoExtTransferBigTestCase(QUICEchoExtTestCase):
+ """QUIC Echo External Transfer Big Test Case"""
+ server_fifo_size = '4M'
+ client_fifo_size = '4M'
+ test_bytes = ''
+ timeout = 60
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ def test_quic_ext_transfer_big(self):
+ """QUIC external transfer, big stream"""
+ self.server("TX=0", "RX=2G")
+ self.client("TX=2G", "RX=0")
+ self.validate_ext_test_results()
+
+
+class QUICEchoExtQcloseRxTestCase(QUICEchoExtTestCase):
+ """QUIC Echo External Transfer Qclose Rx Test Case"""
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ @unittest.skip("testcase under development")
+ def test_quic_ext_qclose_rx(self):
+ """QUIC external transfer, rx close"""
+ self.server("TX=0", "RX=10M", "qclose=Y", "sclose=N")
+ self.client("TX=10M", "RX=0", "qclose=W", "sclose=W")
+ self.validate_ext_test_results()
+
+
+class QUICEchoExtQcloseTxTestCase(QUICEchoExtTestCase):
+ """QUIC Echo External Transfer Qclose Tx Test Case"""
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ @unittest.skip("testcase under development")
+ def test_quic_ext_qclose_tx(self):
+ """QUIC external transfer, tx close"""
+ self.server("TX=0", "RX=10M", "qclose=W", "sclose=W",
+ "rx-results-diff")
+ self.client("TX=10M", "RX=0", "qclose=Y", "sclose=N")
+ self.validate_ext_test_results()
+
+
+class QUICEchoExtEarlyQcloseRxTestCase(QUICEchoExtTestCase):
+ """QUIC Echo External Transfer Early Qclose Rx Test Case"""
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ @unittest.skip("testcase under development")
+ def test_quic_ext_early_qclose_rx(self):
+ """QUIC external transfer, early rx close"""
+ self.server("TX=0", "RX=10M", "qclose=Y", "sclose=N")
+ self.client("TX=20M", "RX=0", "qclose=W", "sclose=W",
+ "tx-results-diff")
+ self.validate_ext_test_results()
+
+
+class QUICEchoExtEarlyQcloseTxTestCase(QUICEchoExtTestCase):
+ """QUIC Echo External Transfer Early Qclose Tx Test Case"""
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ @unittest.skip("testcase under development")
+ def test_quic_ext_early_qclose_tx(self):
+ """QUIC external transfer, early tx close"""
+ self.server("TX=0", "RX=20M", "qclose=W", "sclose=W",
+ "rx-results-diff")
+ self.client("TX=10M", "RX=0", "qclose=Y", "sclose=N")
+ self.validate_ext_test_results()
+
+
+class QUICEchoExtScloseRxTestCase(QUICEchoExtTestCase):
+ """QUIC Echo External Transfer Sclose Rx Test Case"""
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ @unittest.skip("testcase under development")
+ def test_quic_ext_sclose_rx(self):
+ """QUIC external transfer, rx stream close"""
+ self.server("TX=0", "RX=10M", "qclose=N", "sclose=Y")
+ self.client("TX=10M", "RX=0", "qclose=W", "sclose=W")
+ self.validate_ext_test_results()
+
+
+class QUICEchoExtScloseTxTestCase(QUICEchoExtTestCase):
+ """QUIC Echo External Transfer Sclose Tx Test Case"""
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ @unittest.skip("testcase under development")
+ def test_quic_ext_sclose_tx(self):
+ """QUIC external transfer, tx stream close"""
+ self.server("TX=0", "RX=10M", "qclose=W", "sclose=W")
+ self.client("TX=10M", "RX=0", "qclose=Y", "sclose=Y")
+ self.validate_ext_test_results()
+
+
+class QUICEchoExtEarlyScloseRxTestCase(QUICEchoExtTestCase):
+ """QUIC Echo External Transfer Early Sclose Rx Test Case"""
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ @unittest.skip("testcase under development")
+ def test_quic_ext_early_sclose_rx(self):
+ """QUIC external transfer, early rx stream close"""
+ self.server("TX=0", "RX=10M", "qclose=N", "sclose=Y")
+ self.client("TX=20M", "RX=0", "qclose=W", "sclose=W",
+ "tx-results-diff")
+ self.validate_ext_test_results()
+
+
+class QUICEchoExtEarlyScloseTxTestCase(QUICEchoExtTestCase):
+ """QUIC Echo External Transfer Early Sclose Tx Test Case"""
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ @unittest.skip("testcase under development")
+ def test_quic_ext_early_sclose_tx(self):
+ """QUIC external transfer, early tx stream close"""
+ self.server("TX=0", "RX=20M", "qclose=W", "sclose=W",
+ "rx-results-diff")
+ self.client("TX=10M", "RX=0", "qclose=Y", "sclose=Y")
+ self.validate_ext_test_results()
+
+
+class QUICEchoExtServerStreamTestCase(QUICEchoExtTestCase):
+ """QUIC Echo External Transfer Server Stream Test Case"""
+ quic_setup = "serverstream"
+ timeout = 60
+
+ def test_quic_ext_transfer_server_stream(self):
+ """QUIC external server transfer"""
+ self.server("TX=10M", "RX=0")
+ self.client("TX=0", "RX=10M")
+ self.validate_ext_test_results()
+
+
+class QUICEchoExtServerStreamBigTestCase(QUICEchoExtTestCase):
+ """QUIC Echo External Transfer Server Stream Big Test Case"""
+ quic_setup = "serverstream"
+ server_fifo_size = '4M'
+ client_fifo_size = '4M'
+ test_bytes = ''
+ timeout = 60
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ def test_quic_ext_transfer_server_stream_big(self):
+ """QUIC external server transfer, big stream"""
+ self.server("TX=2G", "RX=0")
+ self.client("TX=0", "RX=2G")
+ self.validate_ext_test_results()
+
+
+class QUICEchoExtServerStreamQcloseRxTestCase(QUICEchoExtTestCase):
+ """QUIC Echo External Transfer Server Stream Qclose Rx Test Case"""
+ quic_setup = "serverstream"
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ @unittest.skip("testcase under development")
+ def test_quic_ext_server_stream_qclose_rx(self):
+ """QUIC external server transfer, rx close"""
+ self.server("TX=10M", "RX=0", "qclose=W", "sclose=W")
+ self.client("TX=0", "RX=10M", "qclose=Y", "sclose=N")
+ self.validate_ext_test_results()
+
+
+class QUICEchoExtServerStreamQcloseTxTestCase(QUICEchoExtTestCase):
+ """QUIC Echo External Transfer Server Stream Qclose Tx Test Case"""
+ quic_setup = "serverstream"
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ @unittest.skip("testcase under development")
+ def test_quic_ext_server_stream_qclose_tx(self):
+ """QUIC external server transfer, tx close"""
+ self.server("TX=10M", "RX=0", "qclose=Y", "sclose=N")
+ self.client("TX=0", "RX=10M", "qclose=W", "sclose=W",
+ "rx-results-diff")
+ self.validate_ext_test_results()
+
+
+class QUICEchoExtServerStreamEarlyQcloseRxTestCase(QUICEchoExtTestCase):
+ """QUIC Echo External Transfer Server Stream Early Qclose Rx Test Case"""
+ quic_setup = "serverstream"
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ @unittest.skip("testcase under development")
+ def test_quic_ext_server_stream_early_qclose_rx(self):
+ """QUIC external server transfer, early rx close"""
+ self.server("TX=20M", "RX=0", "qclose=W", "sclose=W",
+ "tx-results-diff")
+ self.client("TX=0", "RX=10M", "qclose=Y", "sclose=N")
+ self.validate_ext_test_results()
+
+
+class QUICEchoExtServerStreamEarlyQcloseTxTestCase(QUICEchoExtTestCase):
+ """QUIC Echo External Transfer Server Stream Early Qclose Tx Test Case"""
+ quic_setup = "serverstream"
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ @unittest.skip("testcase under development")
+ def test_quic_ext_server_stream_early_qclose_tx(self):
+ """QUIC external server transfer, early tx close"""
+ self.server("TX=10M", "RX=0", "qclose=Y", "sclose=N")
+ self.client("TX=0", "RX=20M", "qclose=W", "sclose=W",
+ "rx-results-diff")
+ self.validate_ext_test_results()
+
+
+class QUICEchoExtServerStreamScloseRxTestCase(QUICEchoExtTestCase):
+ """QUIC Echo External Transfer Server Stream Sclose Rx Test Case"""
+ quic_setup = "serverstream"
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ @unittest.skip("testcase under development")
+ def test_quic_ext_server_stream_sclose_rx(self):
+ """QUIC external server transfer, rx stream close"""
+ self.server("TX=10M", "RX=0", "qclose=W", "sclose=W")
+ self.client("TX=0", "RX=10M", "qclose=N", "sclose=Y")
+ self.validate_ext_test_results()
+
+
+class QUICEchoExtServerStreamScloseTxTestCase(QUICEchoExtTestCase):
+ """QUIC Echo External Transfer Server Stream Sclose Tx Test Case"""
+ quic_setup = "serverstream"
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ @unittest.skip("testcase under development")
+ def test_quic_ext_server_stream_sclose_tx(self):
+ """QUIC external server transfer, tx stream close"""
+ self.server("TX=10M", "RX=0", "qclose=Y", "sclose=Y")
+ self.client("TX=0", "RX=10M", "qclose=W", "sclose=W")
+ self.validate_ext_test_results()
+
+
+class QUICEchoExtServerStreamEarlyScloseRxTestCase(QUICEchoExtTestCase):
+ """QUIC Echo External Transfer Server Stream Early Sclose Rx Test Case"""
+ quic_setup = "serverstream"
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ @unittest.skip("testcase under development")
+ def test_quic_ext_server_stream_early_sclose_rx(self):
+ """QUIC external server transfer, early rx stream close"""
+ self.server("TX=20M", "RX=0", "qclose=W", "sclose=W",
+ "tx-results-diff")
+ self.client("TX=0", "RX=10M", "qclose=N", "sclose=Y")
+ self.validate_ext_test_results()
+
+
+class QUICEchoExtServerStreamEarlyScloseTxTestCase(QUICEchoExtTestCase):
+ """QUIC Echo Ext Transfer Server Stream Early Sclose Tx Test Case"""
+ quic_setup = "serverstream"
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ @unittest.skip("testcase under development")
+ def test_quic_ext_server_stream_early_sclose_tx(self):
+ """QUIC external server transfer, early tx stream close"""
+ self.server("TX=10M", "RX=0", "qclose=Y", "sclose=Y")
+ self.client("TX=0", "RX=20M", "qclose=W", "sclose=W",
+ "rx-results-diff")
+ self.validate_ext_test_results()
+
+
+class QUICEchoExtServerStreamWorkersTestCase(QUICEchoExtTestCase):
+ """QUIC Echo External Transfer Server Stream MultiWorker Test Case"""
+ quic_setup = "serverstream"
+
+ @unittest.skipUnless(running_extended_tests, "part of extended tests")
+ @unittest.skip("testcase under development")
+ def test_quic_ext_transfer_server_stream_multi_workers(self):
+ """QUIC external server transfer, multi-worker"""
+ self.server("nclients", "4", "quic-streams", "4", "TX=10M", "RX=0")
+ self.client("nclients", "4", "quic-streams", "4", "TX=0", "RX=10M")
+ self.validate_ext_test_results()
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_srv6.py b/test/test_srv6.py
new file mode 100644
index 00000000000..449ad59ac60
--- /dev/null
+++ b/test/test_srv6.py
@@ -0,0 +1,2147 @@
+#!/usr/bin/env python3
+
+import unittest
+import binascii
+from socket import AF_INET6
+
+from framework import VppTestCase, VppTestRunner
+from vpp_ip_route import VppIpRoute, VppRoutePath, FibPathProto, VppIpTable
+from vpp_srv6 import SRv6LocalSIDBehaviors, VppSRv6LocalSID, VppSRv6Policy, \
+ SRv6PolicyType, VppSRv6Steering, SRv6PolicySteeringTypes
+
+import scapy.compat
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether, Dot1Q
+from scapy.layers.inet6 import IPv6, UDP, IPv6ExtHdrSegmentRouting
+from scapy.layers.inet import IP, UDP
+
+from util import ppp
+
+
+class TestSRv6(VppTestCase):
+ """ SRv6 Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestSRv6, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestSRv6, cls).tearDownClass()
+
+ def setUp(self):
+ """ Perform test setup before each test case.
+ """
+ super(TestSRv6, self).setUp()
+
+ # packet sizes, inclusive L2 overhead
+ self.pg_packet_sizes = [64, 512, 1518, 9018]
+
+ # reset packet_infos
+ self.reset_packet_infos()
+
+ def tearDown(self):
+ """ Clean up test setup after each test case.
+ """
+ self.teardown_interfaces()
+
+ super(TestSRv6, self).tearDown()
+
+ def configure_interface(self,
+ interface,
+ ipv6=False, ipv4=False,
+ ipv6_table_id=0, ipv4_table_id=0):
+ """ Configure interface.
+ :param ipv6: configure IPv6 on interface
+ :param ipv4: configure IPv4 on interface
+ :param ipv6_table_id: FIB table_id for IPv6
+ :param ipv4_table_id: FIB table_id for IPv4
+ """
+ self.logger.debug("Configuring interface %s" % (interface.name))
+ if ipv6:
+ self.logger.debug("Configuring IPv6")
+ interface.set_table_ip6(ipv6_table_id)
+ interface.config_ip6()
+ interface.resolve_ndp(timeout=5)
+ if ipv4:
+ self.logger.debug("Configuring IPv4")
+ interface.set_table_ip4(ipv4_table_id)
+ interface.config_ip4()
+ interface.resolve_arp()
+ interface.admin_up()
+
+ def setup_interfaces(self, ipv6=[], ipv4=[],
+ ipv6_table_id=[], ipv4_table_id=[]):
+ """ Create and configure interfaces.
+
+ :param ipv6: list of interface IPv6 capabilities
+ :param ipv4: list of interface IPv4 capabilities
+ :param ipv6_table_id: list of intf IPv6 FIB table_ids
+ :param ipv4_table_id: list of intf IPv4 FIB table_ids
+ :returns: List of created interfaces.
+ """
+ # how many interfaces?
+ if len(ipv6):
+ count = len(ipv6)
+ else:
+ count = len(ipv4)
+ self.logger.debug("Creating and configuring %d interfaces" % (count))
+
+ # fill up ipv6 and ipv4 lists if needed
+ # not enabled (False) is the default
+ if len(ipv6) < count:
+ ipv6 += (count - len(ipv6)) * [False]
+ if len(ipv4) < count:
+ ipv4 += (count - len(ipv4)) * [False]
+
+ # fill up table_id lists if needed
+ # table_id 0 (global) is the default
+ if len(ipv6_table_id) < count:
+ ipv6_table_id += (count - len(ipv6_table_id)) * [0]
+ if len(ipv4_table_id) < count:
+ ipv4_table_id += (count - len(ipv4_table_id)) * [0]
+
+ # create 'count' pg interfaces
+ self.create_pg_interfaces(range(count))
+
+ # setup all interfaces
+ for i in range(count):
+ intf = self.pg_interfaces[i]
+ self.configure_interface(intf,
+ ipv6[i], ipv4[i],
+ ipv6_table_id[i], ipv4_table_id[i])
+
+ if any(ipv6):
+ self.logger.debug(self.vapi.cli("show ip6 neighbors"))
+ if any(ipv4):
+ self.logger.debug(self.vapi.cli("show ip4 neighbors"))
+ self.logger.debug(self.vapi.cli("show interface"))
+ self.logger.debug(self.vapi.cli("show hardware"))
+
+ return self.pg_interfaces
+
+ def teardown_interfaces(self):
+ """ Unconfigure and bring down interface.
+ """
+ self.logger.debug("Tearing down interfaces")
+ # tear down all interfaces
+ # AFAIK they cannot be deleted
+ for i in self.pg_interfaces:
+ self.logger.debug("Tear down interface %s" % (i.name))
+ i.admin_down()
+ i.unconfig()
+ i.set_table_ip4(0)
+ i.set_table_ip6(0)
+
+ @unittest.skipUnless(0, "PC to fix")
+ def test_SRv6_T_Encaps(self):
+ """ Test SRv6 Transit.Encaps behavior for IPv6.
+ """
+ # send traffic to one destination interface
+ # source and destination are IPv6 only
+ self.setup_interfaces(ipv6=[True, True])
+
+ # configure FIB entries
+ route = VppIpRoute(self, "a4::", 64,
+ [VppRoutePath(self.pg1.remote_ip6,
+ self.pg1.sw_if_index)])
+ route.add_vpp_config()
+
+ # configure encaps IPv6 source address
+ # needs to be done before SR Policy config
+ # TODO: API?
+ self.vapi.cli("set sr encaps source addr a3::")
+
+ bsid = 'a3::9999:1'
+ # configure SRv6 Policy
+ # Note: segment list order: first -> last
+ sr_policy = VppSRv6Policy(
+ self, bsid=bsid,
+ is_encap=1,
+ sr_type=SRv6PolicyType.SR_POLICY_TYPE_DEFAULT,
+ weight=1, fib_table=0,
+ segments=['a4::', 'a5::', 'a6::c7'],
+ source='a3::')
+ sr_policy.add_vpp_config()
+ self.sr_policy = sr_policy
+
+ # log the sr policies
+ self.logger.info(self.vapi.cli("show sr policies"))
+
+ # steer IPv6 traffic to a7::/64 into SRv6 Policy
+ # use the bsid of the above self.sr_policy
+ pol_steering = VppSRv6Steering(
+ self,
+ bsid=self.sr_policy.bsid,
+ prefix="a7::", mask_width=64,
+ traffic_type=SRv6PolicySteeringTypes.SR_STEER_IPV6,
+ sr_policy_index=0, table_id=0,
+ sw_if_index=0)
+ pol_steering.add_vpp_config()
+
+ # log the sr steering policies
+ self.logger.info(self.vapi.cli("show sr steering policies"))
+
+ # create packets
+ count = len(self.pg_packet_sizes)
+ dst_inner = 'a7::1234'
+ pkts = []
+
+ # create IPv6 packets without SRH
+ packet_header = self.create_packet_header_IPv6(dst_inner)
+ # create traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # create IPv6 packets with SRH
+ # packets with segments-left 1, active segment a7::
+ packet_header = self.create_packet_header_IPv6_SRH(
+ sidlist=['a8::', 'a7::', 'a6::'],
+ segleft=1)
+ # create traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # create IPv6 packets with SRH and IPv6
+ # packets with segments-left 1, active segment a7::
+ packet_header = self.create_packet_header_IPv6_SRH_IPv6(
+ dst_inner,
+ sidlist=['a8::', 'a7::', 'a6::'],
+ segleft=1)
+ # create traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg0, pkts, self.pg1,
+ self.compare_rx_tx_packet_T_Encaps)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # remove SR steering
+ pol_steering.remove_vpp_config()
+ self.logger.info(self.vapi.cli("show sr steering policies"))
+
+ # remove SR Policies
+ self.sr_policy.remove_vpp_config()
+ self.logger.info(self.vapi.cli("show sr policies"))
+
+ # remove FIB entries
+ # done by tearDown
+
+ # cleanup interfaces
+ self.teardown_interfaces()
+
+ @unittest.skipUnless(0, "PC to fix")
+ def test_SRv6_T_Insert(self):
+ """ Test SRv6 Transit.Insert behavior (IPv6 only).
+ """
+ # send traffic to one destination interface
+ # source and destination are IPv6 only
+ self.setup_interfaces(ipv6=[True, True])
+
+ # configure FIB entries
+ route = VppIpRoute(self, "a4::", 64,
+ [VppRoutePath(self.pg1.remote_ip6,
+ self.pg1.sw_if_index)])
+ route.add_vpp_config()
+
+ # configure encaps IPv6 source address
+ # needs to be done before SR Policy config
+ # TODO: API?
+ self.vapi.cli("set sr encaps source addr a3::")
+
+ bsid = 'a3::9999:1'
+ # configure SRv6 Policy
+ # Note: segment list order: first -> last
+ sr_policy = VppSRv6Policy(
+ self, bsid=bsid,
+ is_encap=0,
+ sr_type=SRv6PolicyType.SR_POLICY_TYPE_DEFAULT,
+ weight=1, fib_table=0,
+ segments=['a4::', 'a5::', 'a6::c7'],
+ source='a3::')
+ sr_policy.add_vpp_config()
+ self.sr_policy = sr_policy
+
+ # log the sr policies
+ self.logger.info(self.vapi.cli("show sr policies"))
+
+ # steer IPv6 traffic to a7::/64 into SRv6 Policy
+ # use the bsid of the above self.sr_policy
+ pol_steering = VppSRv6Steering(
+ self,
+ bsid=self.sr_policy.bsid,
+ prefix="a7::", mask_width=64,
+ traffic_type=SRv6PolicySteeringTypes.SR_STEER_IPV6,
+ sr_policy_index=0, table_id=0,
+ sw_if_index=0)
+ pol_steering.add_vpp_config()
+
+ # log the sr steering policies
+ self.logger.info(self.vapi.cli("show sr steering policies"))
+
+ # create packets
+ count = len(self.pg_packet_sizes)
+ dst_inner = 'a7::1234'
+ pkts = []
+
+ # create IPv6 packets without SRH
+ packet_header = self.create_packet_header_IPv6(dst_inner)
+ # create traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # create IPv6 packets with SRH
+ # packets with segments-left 1, active segment a7::
+ packet_header = self.create_packet_header_IPv6_SRH(
+ sidlist=['a8::', 'a7::', 'a6::'],
+ segleft=1)
+ # create traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg0, pkts, self.pg1,
+ self.compare_rx_tx_packet_T_Insert)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # remove SR steering
+ pol_steering.remove_vpp_config()
+ self.logger.info(self.vapi.cli("show sr steering policies"))
+
+ # remove SR Policies
+ self.sr_policy.remove_vpp_config()
+ self.logger.info(self.vapi.cli("show sr policies"))
+
+ # remove FIB entries
+ # done by tearDown
+
+ # cleanup interfaces
+ self.teardown_interfaces()
+
+ @unittest.skipUnless(0, "PC to fix")
+ def test_SRv6_T_Encaps_IPv4(self):
+ """ Test SRv6 Transit.Encaps behavior for IPv4.
+ """
+ # send traffic to one destination interface
+ # source interface is IPv4 only
+ # destination interface is IPv6 only
+ self.setup_interfaces(ipv6=[False, True], ipv4=[True, False])
+
+ # configure FIB entries
+ route = VppIpRoute(self, "a4::", 64,
+ [VppRoutePath(self.pg1.remote_ip6,
+ self.pg1.sw_if_index)])
+ route.add_vpp_config()
+
+ # configure encaps IPv6 source address
+ # needs to be done before SR Policy config
+ # TODO: API?
+ self.vapi.cli("set sr encaps source addr a3::")
+
+ bsid = 'a3::9999:1'
+ # configure SRv6 Policy
+ # Note: segment list order: first -> last
+ sr_policy = VppSRv6Policy(
+ self, bsid=bsid,
+ is_encap=1,
+ sr_type=SRv6PolicyType.SR_POLICY_TYPE_DEFAULT,
+ weight=1, fib_table=0,
+ segments=['a4::', 'a5::', 'a6::c7'],
+ source='a3::')
+ sr_policy.add_vpp_config()
+ self.sr_policy = sr_policy
+
+ # log the sr policies
+ self.logger.info(self.vapi.cli("show sr policies"))
+
+ # steer IPv4 traffic to 7.1.1.0/24 into SRv6 Policy
+ # use the bsid of the above self.sr_policy
+ pol_steering = VppSRv6Steering(
+ self,
+ bsid=self.sr_policy.bsid,
+ prefix="7.1.1.0", mask_width=24,
+ traffic_type=SRv6PolicySteeringTypes.SR_STEER_IPV4,
+ sr_policy_index=0, table_id=0,
+ sw_if_index=0)
+ pol_steering.add_vpp_config()
+
+ # log the sr steering policies
+ self.logger.info(self.vapi.cli("show sr steering policies"))
+
+ # create packets
+ count = len(self.pg_packet_sizes)
+ dst_inner = '7.1.1.123'
+ pkts = []
+
+ # create IPv4 packets
+ packet_header = self.create_packet_header_IPv4(dst_inner)
+ # create traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg0, pkts, self.pg1,
+ self.compare_rx_tx_packet_T_Encaps_IPv4)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # remove SR steering
+ pol_steering.remove_vpp_config()
+ self.logger.info(self.vapi.cli("show sr steering policies"))
+
+ # remove SR Policies
+ self.sr_policy.remove_vpp_config()
+ self.logger.info(self.vapi.cli("show sr policies"))
+
+ # remove FIB entries
+ # done by tearDown
+
+ # cleanup interfaces
+ self.teardown_interfaces()
+
+ @unittest.skip("VPP crashes after running this test")
+ def test_SRv6_T_Encaps_L2(self):
+ """ Test SRv6 Transit.Encaps behavior for L2.
+ """
+ # send traffic to one destination interface
+ # source interface is IPv4 only TODO?
+ # destination interface is IPv6 only
+ self.setup_interfaces(ipv6=[False, True], ipv4=[False, False])
+
+ # configure FIB entries
+ route = VppIpRoute(self, "a4::", 64,
+ [VppRoutePath(self.pg1.remote_ip6,
+ self.pg1.sw_if_index)])
+ route.add_vpp_config()
+
+ # configure encaps IPv6 source address
+ # needs to be done before SR Policy config
+ # TODO: API?
+ self.vapi.cli("set sr encaps source addr a3::")
+
+ bsid = 'a3::9999:1'
+ # configure SRv6 Policy
+ # Note: segment list order: first -> last
+ sr_policy = VppSRv6Policy(
+ self, bsid=bsid,
+ is_encap=1,
+ sr_type=SRv6PolicyType.SR_POLICY_TYPE_DEFAULT,
+ weight=1, fib_table=0,
+ segments=['a4::', 'a5::', 'a6::c7'],
+ source='a3::')
+ sr_policy.add_vpp_config()
+ self.sr_policy = sr_policy
+
+ # log the sr policies
+ self.logger.info(self.vapi.cli("show sr policies"))
+
+ # steer L2 traffic into SRv6 Policy
+ # use the bsid of the above self.sr_policy
+ pol_steering = VppSRv6Steering(
+ self,
+ bsid=self.sr_policy.bsid,
+ prefix="::", mask_width=0,
+ traffic_type=SRv6PolicySteeringTypes.SR_STEER_L2,
+ sr_policy_index=0, table_id=0,
+ sw_if_index=self.pg0.sw_if_index)
+ pol_steering.add_vpp_config()
+
+ # log the sr steering policies
+ self.logger.info(self.vapi.cli("show sr steering policies"))
+
+ # create packets
+ count = len(self.pg_packet_sizes)
+ pkts = []
+
+ # create L2 packets without dot1q header
+ packet_header = self.create_packet_header_L2()
+ # create traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # create L2 packets with dot1q header
+ packet_header = self.create_packet_header_L2(vlan=123)
+ # create traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg0, pkts, self.pg1,
+ self.compare_rx_tx_packet_T_Encaps_L2)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # remove SR steering
+ pol_steering.remove_vpp_config()
+ self.logger.info(self.vapi.cli("show sr steering policies"))
+
+ # remove SR Policies
+ self.sr_policy.remove_vpp_config()
+ self.logger.info(self.vapi.cli("show sr policies"))
+
+ # remove FIB entries
+ # done by tearDown
+
+ # cleanup interfaces
+ self.teardown_interfaces()
+
+ def test_SRv6_End(self):
+ """ Test SRv6 End (without PSP) behavior.
+ """
+ # send traffic to one destination interface
+ # source and destination interfaces are IPv6 only
+ self.setup_interfaces(ipv6=[True, True])
+
+ # configure FIB entries
+ route = VppIpRoute(self, "a4::", 64,
+ [VppRoutePath(self.pg1.remote_ip6,
+ self.pg1.sw_if_index)])
+ route.add_vpp_config()
+
+ # configure SRv6 localSID End without PSP behavior
+ localsid = VppSRv6LocalSID(
+ self, localsid='A3::0',
+ behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_END,
+ nh_addr=0,
+ end_psp=0,
+ sw_if_index=0,
+ vlan_index=0,
+ fib_table=0)
+ localsid.add_vpp_config()
+ # log the localsids
+ self.logger.debug(self.vapi.cli("show sr localsid"))
+
+ # create IPv6 packets with SRH (SL=2, SL=1, SL=0)
+ # send one packet per SL value per packet size
+ # SL=0 packet with localSID End with USP needs 2nd SRH
+ count = len(self.pg_packet_sizes)
+ dst_inner = 'a4::1234'
+ pkts = []
+
+ # packets with segments-left 2, active segment a3::
+ packet_header = self.create_packet_header_IPv6_SRH_IPv6(
+ dst_inner,
+ sidlist=['a5::', 'a4::', 'a3::'],
+ segleft=2)
+ # create traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # packets with segments-left 1, active segment a3::
+ packet_header = self.create_packet_header_IPv6_SRH_IPv6(
+ dst_inner,
+ sidlist=['a4::', 'a3::', 'a2::'],
+ segleft=1)
+ # add to traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # TODO: test behavior with SL=0 packet (needs 2*SRH?)
+
+ expected_count = len(pkts)
+
+ # packets without SRH (should not crash)
+ packet_header = self.create_packet_header_IPv6('a3::')
+ # create traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg0, pkts, self.pg1,
+ self.compare_rx_tx_packet_End,
+ expected_count=expected_count)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # remove SRv6 localSIDs
+ localsid.remove_vpp_config()
+
+ # remove FIB entries
+ # done by tearDown
+
+ # cleanup interfaces
+ self.teardown_interfaces()
+
+ def test_SRv6_End_with_PSP(self):
+ """ Test SRv6 End with PSP behavior.
+ """
+ # send traffic to one destination interface
+ # source and destination interfaces are IPv6 only
+ self.setup_interfaces(ipv6=[True, True])
+
+ # configure FIB entries
+ route = VppIpRoute(self, "a4::", 64,
+ [VppRoutePath(self.pg1.remote_ip6,
+ self.pg1.sw_if_index)])
+ route.add_vpp_config()
+
+ # configure SRv6 localSID End with PSP behavior
+ localsid = VppSRv6LocalSID(
+ self, localsid='A3::0',
+ behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_END,
+ nh_addr=0,
+ end_psp=1,
+ sw_if_index=0,
+ vlan_index=0,
+ fib_table=0)
+ localsid.add_vpp_config()
+ # log the localsids
+ self.logger.debug(self.vapi.cli("show sr localsid"))
+
+ # create IPv6 packets with SRH (SL=2, SL=1)
+ # send one packet per SL value per packet size
+ # SL=0 packet with localSID End with PSP is dropped
+ count = len(self.pg_packet_sizes)
+ dst_inner = 'a4::1234'
+ pkts = []
+
+ # packets with segments-left 2, active segment a3::
+ packet_header = self.create_packet_header_IPv6_SRH_IPv6(
+ dst_inner,
+ sidlist=['a5::', 'a4::', 'a3::'],
+ segleft=2)
+ # create traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # packets with segments-left 1, active segment a3::
+ packet_header = self.create_packet_header_IPv6_SRH_IPv6(
+ dst_inner,
+ sidlist=['a4::', 'a3::', 'a2::'],
+ segleft=1)
+ # add to traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg0, pkts, self.pg1,
+ self.compare_rx_tx_packet_End_PSP)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # remove SRv6 localSIDs
+ localsid.remove_vpp_config()
+
+ # remove FIB entries
+ # done by tearDown
+
+ # cleanup interfaces
+ self.teardown_interfaces()
+
+ def test_SRv6_End_X(self):
+ """ Test SRv6 End.X (without PSP) behavior.
+ """
+ # create three interfaces (1 source, 2 destinations)
+ # source and destination interfaces are IPv6 only
+ self.setup_interfaces(ipv6=[True, True, True])
+
+ # configure FIB entries
+ # a4::/64 via pg1 and pg2
+ route = VppIpRoute(self, "a4::", 64,
+ [VppRoutePath(self.pg1.remote_ip6,
+ self.pg1.sw_if_index),
+ VppRoutePath(self.pg2.remote_ip6,
+ self.pg2.sw_if_index)])
+ route.add_vpp_config()
+ self.logger.debug(self.vapi.cli("show ip6 fib"))
+
+ # configure SRv6 localSID End.X without PSP behavior
+ # End.X points to interface pg1
+ localsid = VppSRv6LocalSID(
+ self, localsid='A3::C4',
+ behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_X,
+ nh_addr=self.pg1.remote_ip6,
+ end_psp=0,
+ sw_if_index=self.pg1.sw_if_index,
+ vlan_index=0,
+ fib_table=0)
+ localsid.add_vpp_config()
+ # log the localsids
+ self.logger.debug(self.vapi.cli("show sr localsid"))
+
+ # create IPv6 packets with SRH (SL=2, SL=1)
+ # send one packet per SL value per packet size
+ # SL=0 packet with localSID End with PSP is dropped
+ count = len(self.pg_packet_sizes)
+ dst_inner = 'a4::1234'
+ pkts = []
+
+ # packets with segments-left 2, active segment a3::c4
+ packet_header = self.create_packet_header_IPv6_SRH_IPv6(
+ dst_inner,
+ sidlist=['a5::', 'a4::', 'a3::c4'],
+ segleft=2)
+ # create traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # packets with segments-left 1, active segment a3::c4
+ packet_header = self.create_packet_header_IPv6_SRH_IPv6(
+ dst_inner,
+ sidlist=['a4::', 'a3::c4', 'a2::'],
+ segleft=1)
+ # add to traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # send packets and verify received packets
+ # using same comparison function as End (no PSP)
+ self.send_and_verify_pkts(self.pg0, pkts, self.pg1,
+ self.compare_rx_tx_packet_End)
+
+ # assert nothing was received on the other interface (pg2)
+ self.pg2.assert_nothing_captured("mis-directed packet(s)")
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # remove SRv6 localSIDs
+ localsid.remove_vpp_config()
+
+ # remove FIB entries
+ # done by tearDown
+
+ # cleanup interfaces
+ self.teardown_interfaces()
+
+ def test_SRv6_End_X_with_PSP(self):
+ """ Test SRv6 End.X with PSP behavior.
+ """
+ # create three interfaces (1 source, 2 destinations)
+ # source and destination interfaces are IPv6 only
+ self.setup_interfaces(ipv6=[True, True, True])
+
+ # configure FIB entries
+ # a4::/64 via pg1 and pg2
+ route = VppIpRoute(self, "a4::", 64,
+ [VppRoutePath(
+ self.pg1.remote_ip6,
+ self.pg1.sw_if_index),
+ VppRoutePath(self.pg2.remote_ip6,
+ self.pg2.sw_if_index)])
+ route.add_vpp_config()
+
+ # configure SRv6 localSID End with PSP behavior
+ localsid = VppSRv6LocalSID(
+ self, localsid='A3::C4',
+ behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_X,
+ nh_addr=self.pg1.remote_ip6,
+ end_psp=1,
+ sw_if_index=self.pg1.sw_if_index,
+ vlan_index=0,
+ fib_table=0)
+ localsid.add_vpp_config()
+ # log the localsids
+ self.logger.debug(self.vapi.cli("show sr localsid"))
+
+ # create IPv6 packets with SRH (SL=2, SL=1)
+ # send one packet per SL value per packet size
+ # SL=0 packet with localSID End with PSP is dropped
+ count = len(self.pg_packet_sizes)
+ dst_inner = 'a4::1234'
+ pkts = []
+
+ # packets with segments-left 2, active segment a3::
+ packet_header = self.create_packet_header_IPv6_SRH_IPv6(
+ dst_inner,
+ sidlist=['a5::', 'a4::', 'a3::c4'],
+ segleft=2)
+ # create traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # packets with segments-left 1, active segment a3::
+ packet_header = self.create_packet_header_IPv6_SRH_IPv6(
+ dst_inner,
+ sidlist=['a4::', 'a3::c4', 'a2::'],
+ segleft=1)
+ # add to traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # send packets and verify received packets
+ # using same comparison function as End with PSP
+ self.send_and_verify_pkts(self.pg0, pkts, self.pg1,
+ self.compare_rx_tx_packet_End_PSP)
+
+ # assert nothing was received on the other interface (pg2)
+ self.pg2.assert_nothing_captured("mis-directed packet(s)")
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # remove SRv6 localSIDs
+ localsid.remove_vpp_config()
+
+ # remove FIB entries
+ # done by tearDown
+
+ # cleanup interfaces
+ self.teardown_interfaces()
+
+ def test_SRv6_End_DX6(self):
+ """ Test SRv6 End.DX6 behavior.
+ """
+ # send traffic to one destination interface
+ # source and destination interfaces are IPv6 only
+ self.setup_interfaces(ipv6=[True, True])
+
+ # configure SRv6 localSID End.DX6 behavior
+ localsid = VppSRv6LocalSID(
+ self, localsid='A3::C4',
+ behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_DX6,
+ nh_addr=self.pg1.remote_ip6,
+ end_psp=0,
+ sw_if_index=self.pg1.sw_if_index,
+ vlan_index=0,
+ fib_table=0)
+ localsid.add_vpp_config()
+ # log the localsids
+ self.logger.debug(self.vapi.cli("show sr localsid"))
+
+ # create IPv6 packets with SRH (SL=0)
+ # send one packet per packet size
+ count = len(self.pg_packet_sizes)
+ dst_inner = 'a4::1234' # inner header destination address
+ pkts = []
+
+ # packets with SRH, segments-left 0, active segment a3::c4
+ packet_header = self.create_packet_header_IPv6_SRH_IPv6(
+ dst_inner,
+ sidlist=['a3::c4', 'a2::', 'a1::'],
+ segleft=0)
+ # add to traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # packets without SRH, IPv6 in IPv6
+ # outer IPv6 dest addr is the localsid End.DX6
+ packet_header = self.create_packet_header_IPv6_IPv6(
+ dst_inner,
+ dst_outer='a3::c4')
+ # add to traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg0, pkts, self.pg1,
+ self.compare_rx_tx_packet_End_DX6)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # remove SRv6 localSIDs
+ localsid.remove_vpp_config()
+
+ # cleanup interfaces
+ self.teardown_interfaces()
+
+ def test_SRv6_End_DT6(self):
+ """ Test SRv6 End.DT6 behavior.
+ """
+ # create three interfaces (1 source, 2 destinations)
+ # all interfaces are IPv6 only
+ # source interface in global FIB (0)
+ # destination interfaces in global and vrf
+ vrf_1 = 1
+ ipt = VppIpTable(self, vrf_1, is_ip6=True)
+ ipt.add_vpp_config()
+ self.setup_interfaces(ipv6=[True, True, True],
+ ipv6_table_id=[0, 0, vrf_1])
+
+ # configure FIB entries
+ # a4::/64 is reachable
+ # via pg1 in table 0 (global)
+ # and via pg2 in table vrf_1
+ route0 = VppIpRoute(self, "a4::", 64,
+ [VppRoutePath(self.pg1.remote_ip6,
+ self.pg1.sw_if_index,
+ nh_table_id=0)],
+ table_id=0)
+ route0.add_vpp_config()
+ route1 = VppIpRoute(self, "a4::", 64,
+ [VppRoutePath(self.pg2.remote_ip6,
+ self.pg2.sw_if_index,
+ nh_table_id=vrf_1)],
+ table_id=vrf_1)
+ route1.add_vpp_config()
+ self.logger.debug(self.vapi.cli("show ip6 fib"))
+
+ # configure SRv6 localSID End.DT6 behavior
+ # Note:
+ # fib_table: where the localsid is installed
+ # sw_if_index: in T-variants of localsid this is the vrf table_id
+ localsid = VppSRv6LocalSID(
+ self, localsid='A3::C4',
+ behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_DT6,
+ nh_addr=0,
+ end_psp=0,
+ sw_if_index=vrf_1,
+ vlan_index=0,
+ fib_table=0)
+ localsid.add_vpp_config()
+ # log the localsids
+ self.logger.debug(self.vapi.cli("show sr localsid"))
+
+ # create IPv6 packets with SRH (SL=0)
+ # send one packet per packet size
+ count = len(self.pg_packet_sizes)
+ dst_inner = 'a4::1234' # inner header destination address
+ pkts = []
+
+ # packets with SRH, segments-left 0, active segment a3::c4
+ packet_header = self.create_packet_header_IPv6_SRH_IPv6(
+ dst_inner,
+ sidlist=['a3::c4', 'a2::', 'a1::'],
+ segleft=0)
+ # add to traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg2, packet_header,
+ self.pg_packet_sizes, count))
+
+ # packets without SRH, IPv6 in IPv6
+ # outer IPv6 dest addr is the localsid End.DT6
+ packet_header = self.create_packet_header_IPv6_IPv6(
+ dst_inner,
+ dst_outer='a3::c4')
+ # add to traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg2, packet_header,
+ self.pg_packet_sizes, count))
+
+ # send packets and verify received packets
+ # using same comparison function as End.DX6
+ self.send_and_verify_pkts(self.pg0, pkts, self.pg2,
+ self.compare_rx_tx_packet_End_DX6)
+
+ # assert nothing was received on the other interface (pg2)
+ self.pg1.assert_nothing_captured("mis-directed packet(s)")
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # remove SRv6 localSIDs
+ localsid.remove_vpp_config()
+
+ # remove FIB entries
+ # done by tearDown
+
+ # cleanup interfaces
+ self.teardown_interfaces()
+
+ def test_SRv6_End_DX4(self):
+ """ Test SRv6 End.DX4 behavior.
+ """
+ # send traffic to one destination interface
+ # source interface is IPv6 only
+ # destination interface is IPv4 only
+ self.setup_interfaces(ipv6=[True, False], ipv4=[False, True])
+
+ # configure SRv6 localSID End.DX4 behavior
+ localsid = VppSRv6LocalSID(
+ self, localsid='A3::C4',
+ behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_DX4,
+ nh_addr=self.pg1.remote_ip4,
+ end_psp=0,
+ sw_if_index=self.pg1.sw_if_index,
+ vlan_index=0,
+ fib_table=0)
+ localsid.add_vpp_config()
+ # log the localsids
+ self.logger.debug(self.vapi.cli("show sr localsid"))
+
+ # send one packet per packet size
+ count = len(self.pg_packet_sizes)
+ dst_inner = '4.1.1.123' # inner header destination address
+ pkts = []
+
+ # packets with SRH, segments-left 0, active segment a3::c4
+ packet_header = self.create_packet_header_IPv6_SRH_IPv4(
+ dst_inner,
+ sidlist=['a3::c4', 'a2::', 'a1::'],
+ segleft=0)
+ # add to traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # packets without SRH, IPv4 in IPv6
+ # outer IPv6 dest addr is the localsid End.DX4
+ packet_header = self.create_packet_header_IPv6_IPv4(
+ dst_inner,
+ dst_outer='a3::c4')
+ # add to traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg0, pkts, self.pg1,
+ self.compare_rx_tx_packet_End_DX4)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # remove SRv6 localSIDs
+ localsid.remove_vpp_config()
+
+ # cleanup interfaces
+ self.teardown_interfaces()
+
+ def test_SRv6_End_DT4(self):
+ """ Test SRv6 End.DT4 behavior.
+ """
+ # create three interfaces (1 source, 2 destinations)
+ # source interface is IPv6-only
+ # destination interfaces are IPv4 only
+ # source interface in global FIB (0)
+ # destination interfaces in global and vrf
+ vrf_1 = 1
+ ipt = VppIpTable(self, vrf_1)
+ ipt.add_vpp_config()
+ self.setup_interfaces(ipv6=[True, False, False],
+ ipv4=[False, True, True],
+ ipv6_table_id=[0, 0, 0],
+ ipv4_table_id=[0, 0, vrf_1])
+
+ # configure FIB entries
+ # 4.1.1.0/24 is reachable
+ # via pg1 in table 0 (global)
+ # and via pg2 in table vrf_1
+ route0 = VppIpRoute(self, "4.1.1.0", 24,
+ [VppRoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index,
+ nh_table_id=0)],
+ table_id=0)
+ route0.add_vpp_config()
+ route1 = VppIpRoute(self, "4.1.1.0", 24,
+ [VppRoutePath(self.pg2.remote_ip4,
+ self.pg2.sw_if_index,
+ nh_table_id=vrf_1)],
+ table_id=vrf_1)
+ route1.add_vpp_config()
+ self.logger.debug(self.vapi.cli("show ip fib"))
+
+ # configure SRv6 localSID End.DT6 behavior
+ # Note:
+ # fib_table: where the localsid is installed
+ # sw_if_index: in T-variants of localsid: vrf table_id
+ localsid = VppSRv6LocalSID(
+ self, localsid='A3::C4',
+ behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_DT4,
+ nh_addr=0,
+ end_psp=0,
+ sw_if_index=vrf_1,
+ vlan_index=0,
+ fib_table=0)
+ localsid.add_vpp_config()
+ # log the localsids
+ self.logger.debug(self.vapi.cli("show sr localsid"))
+
+ # create IPv6 packets with SRH (SL=0)
+ # send one packet per packet size
+ count = len(self.pg_packet_sizes)
+ dst_inner = '4.1.1.123' # inner header destination address
+ pkts = []
+
+ # packets with SRH, segments-left 0, active segment a3::c4
+ packet_header = self.create_packet_header_IPv6_SRH_IPv4(
+ dst_inner,
+ sidlist=['a3::c4', 'a2::', 'a1::'],
+ segleft=0)
+ # add to traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg2, packet_header,
+ self.pg_packet_sizes, count))
+
+ # packets without SRH, IPv6 in IPv6
+ # outer IPv6 dest addr is the localsid End.DX4
+ packet_header = self.create_packet_header_IPv6_IPv4(
+ dst_inner,
+ dst_outer='a3::c4')
+ # add to traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg2, packet_header,
+ self.pg_packet_sizes, count))
+
+ # send packets and verify received packets
+ # using same comparison function as End.DX4
+ self.send_and_verify_pkts(self.pg0, pkts, self.pg2,
+ self.compare_rx_tx_packet_End_DX4)
+
+ # assert nothing was received on the other interface (pg2)
+ self.pg1.assert_nothing_captured("mis-directed packet(s)")
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # remove SRv6 localSIDs
+ localsid.remove_vpp_config()
+
+ # remove FIB entries
+ # done by tearDown
+
+ # cleanup interfaces
+ self.teardown_interfaces()
+
+ def test_SRv6_End_DX2(self):
+ """ Test SRv6 End.DX2 behavior.
+ """
+ # send traffic to one destination interface
+ # source interface is IPv6 only
+ self.setup_interfaces(ipv6=[True, False], ipv4=[False, False])
+
+ # configure SRv6 localSID End.DX2 behavior
+ localsid = VppSRv6LocalSID(
+ self, localsid='A3::C4',
+ behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_DX2,
+ nh_addr=0,
+ end_psp=0,
+ sw_if_index=self.pg1.sw_if_index,
+ vlan_index=0,
+ fib_table=0)
+ localsid.add_vpp_config()
+ # log the localsids
+ self.logger.debug(self.vapi.cli("show sr localsid"))
+
+ # send one packet per packet size
+ count = len(self.pg_packet_sizes)
+ pkts = []
+
+ # packets with SRH, segments-left 0, active segment a3::c4
+ # L2 has no dot1q header
+ packet_header = self.create_packet_header_IPv6_SRH_L2(
+ sidlist=['a3::c4', 'a2::', 'a1::'],
+ segleft=0,
+ vlan=0)
+ # add to traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # packets with SRH, segments-left 0, active segment a3::c4
+ # L2 has dot1q header
+ packet_header = self.create_packet_header_IPv6_SRH_L2(
+ sidlist=['a3::c4', 'a2::', 'a1::'],
+ segleft=0,
+ vlan=123)
+ # add to traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # packets without SRH, L2 in IPv6
+ # outer IPv6 dest addr is the localsid End.DX2
+ # L2 has no dot1q header
+ packet_header = self.create_packet_header_IPv6_L2(
+ dst_outer='a3::c4',
+ vlan=0)
+ # add to traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # packets without SRH, L2 in IPv6
+ # outer IPv6 dest addr is the localsid End.DX2
+ # L2 has dot1q header
+ packet_header = self.create_packet_header_IPv6_L2(
+ dst_outer='a3::c4',
+ vlan=123)
+ # add to traffic stream pg0->pg1
+ pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header,
+ self.pg_packet_sizes, count))
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg0, pkts, self.pg1,
+ self.compare_rx_tx_packet_End_DX2)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # remove SRv6 localSIDs
+ localsid.remove_vpp_config()
+
+ # cleanup interfaces
+ self.teardown_interfaces()
+
+ @unittest.skipUnless(0, "PC to fix")
+ def test_SRv6_T_Insert_Classifier(self):
+ """ Test SRv6 Transit.Insert behavior (IPv6 only).
+ steer packets using the classifier
+ """
+ # send traffic to one destination interface
+ # source and destination are IPv6 only
+ self.setup_interfaces(ipv6=[False, False, False, True, True])
+
+ # configure FIB entries
+ route = VppIpRoute(self, "a4::", 64,
+ [VppRoutePath(
+ self.pg4.remote_ip6,
+ self.pg4.sw_if_index)])
+ route.add_vpp_config()
+
+ # configure encaps IPv6 source address
+ # needs to be done before SR Policy config
+ # TODO: API?
+ self.vapi.cli("set sr encaps source addr a3::")
+
+ bsid = 'a3::9999:1'
+ # configure SRv6 Policy
+ # Note: segment list order: first -> last
+ sr_policy = VppSRv6Policy(
+ self, bsid=bsid,
+ is_encap=0,
+ sr_type=SRv6PolicyType.SR_POLICY_TYPE_DEFAULT,
+ weight=1, fib_table=0,
+ segments=['a4::', 'a5::', 'a6::c7'],
+ source='a3::')
+ sr_policy.add_vpp_config()
+ self.sr_policy = sr_policy
+
+ # log the sr policies
+ self.logger.info(self.vapi.cli("show sr policies"))
+
+ # add classify table
+ # mask on dst ip address prefix a7::/8
+ mask = '{!s:0<16}'.format('ff')
+ r = self.vapi.classify_add_del_table(
+ 1,
+ binascii.unhexlify(mask),
+ match_n_vectors=(len(mask) - 1) // 32 + 1,
+ current_data_flag=1,
+ skip_n_vectors=2) # data offset
+ self.assertIsNotNone(r, 'No response msg for add_del_table')
+ table_index = r.new_table_index
+
+ # add the source routing node as a ip6 inacl netxt node
+ r = self.vapi.add_node_next('ip6-inacl',
+ 'sr-pl-rewrite-insert')
+ inacl_next_node_index = r.node_index
+
+ match = '{!s:0<16}'.format('a7')
+ r = self.vapi.classify_add_del_session(
+ 1,
+ table_index,
+ binascii.unhexlify(match),
+ hit_next_index=inacl_next_node_index,
+ action=3,
+ metadata=0) # sr policy index
+ self.assertIsNotNone(r, 'No response msg for add_del_session')
+
+ # log the classify table used in the steering policy
+ self.logger.info(self.vapi.cli("show classify table"))
+
+ r = self.vapi.input_acl_set_interface(
+ is_add=1,
+ sw_if_index=self.pg3.sw_if_index,
+ ip6_table_index=table_index)
+ self.assertIsNotNone(r,
+ 'No response msg for input_acl_set_interface')
+
+ # log the ip6 inacl
+ self.logger.info(self.vapi.cli("show inacl type ip6"))
+
+ # create packets
+ count = len(self.pg_packet_sizes)
+ dst_inner = 'a7::1234'
+ pkts = []
+
+ # create IPv6 packets without SRH
+ packet_header = self.create_packet_header_IPv6(dst_inner)
+ # create traffic stream pg3->pg4
+ pkts.extend(self.create_stream(self.pg3, self.pg4, packet_header,
+ self.pg_packet_sizes, count))
+
+ # create IPv6 packets with SRH
+ # packets with segments-left 1, active segment a7::
+ packet_header = self.create_packet_header_IPv6_SRH(
+ sidlist=['a8::', 'a7::', 'a6::'],
+ segleft=1)
+ # create traffic stream pg3->pg4
+ pkts.extend(self.create_stream(self.pg3, self.pg4, packet_header,
+ self.pg_packet_sizes, count))
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg3, pkts, self.pg4,
+ self.compare_rx_tx_packet_T_Insert)
+
+ # remove the interface l2 input feature
+ r = self.vapi.input_acl_set_interface(
+ is_add=0,
+ sw_if_index=self.pg3.sw_if_index,
+ ip6_table_index=table_index)
+ self.assertIsNotNone(r,
+ 'No response msg for input_acl_set_interface')
+
+ # log the ip6 inacl after cleaning
+ self.logger.info(self.vapi.cli("show inacl type ip6"))
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # remove classifier SR steering
+ # classifier_steering.remove_vpp_config()
+ self.logger.info(self.vapi.cli("show sr steering policies"))
+
+ # remove SR Policies
+ self.sr_policy.remove_vpp_config()
+ self.logger.info(self.vapi.cli("show sr policies"))
+
+ # remove classify session and table
+ r = self.vapi.classify_add_del_session(
+ 0,
+ table_index,
+ binascii.unhexlify(match))
+ self.assertIsNotNone(r, 'No response msg for add_del_session')
+
+ r = self.vapi.classify_add_del_table(
+ 0,
+ binascii.unhexlify(mask),
+ table_index=table_index)
+ self.assertIsNotNone(r, 'No response msg for add_del_table')
+
+ self.logger.info(self.vapi.cli("show classify table"))
+
+ # remove FIB entries
+ # done by tearDown
+
+ # cleanup interfaces
+ self.teardown_interfaces()
+
+ def compare_rx_tx_packet_T_Encaps(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing T.Encaps
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+ # T.Encaps updates the headers as follows:
+ # SR Policy seglist (S3, S2, S1)
+ # SR Policy source C
+ # IPv6:
+ # in: IPv6(A, B2)
+ # out: IPv6(C, S1)SRH(S3, S2, S1; SL=2)IPv6(A, B2)
+ # IPv6 + SRH:
+ # in: IPv6(A, B2)SRH(B3, B2, B1; SL=1)
+ # out: IPv6(C, S1)SRH(S3, S2, S1; SL=2)IPv6(a, B2)SRH(B3, B2, B1; SL=1)
+
+ # get first (outer) IPv6 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IPv6)
+ rx_srh = None
+
+ tx_ip = tx_pkt.getlayer(IPv6)
+
+ # expected segment-list
+ seglist = self.sr_policy.segments
+ # reverse list to get order as in SRH
+ tx_seglist = seglist[::-1]
+
+ # get source address of SR Policy
+ sr_policy_source = self.sr_policy.source
+
+ # rx'ed packet should have SRH
+ self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+ # get SRH
+ rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting)
+
+ # received ip.src should be equal to SR Policy source
+ self.assertEqual(rx_ip.src, sr_policy_source)
+ # received ip.dst should be equal to expected sidlist[lastentry]
+ self.assertEqual(rx_ip.dst, tx_seglist[-1])
+ # rx'ed seglist should be equal to expected seglist
+ self.assertEqual(rx_srh.addresses, tx_seglist)
+ # segleft should be equal to size expected seglist-1
+ self.assertEqual(rx_srh.segleft, len(tx_seglist)-1)
+ # segleft should be equal to lastentry
+ self.assertEqual(rx_srh.segleft, rx_srh.lastentry)
+
+ # the whole rx'ed pkt beyond SRH should be equal to tx'ed pkt
+ # except for the hop-limit field
+ # -> update tx'ed hlim to the expected hlim
+ tx_ip.hlim = tx_ip.hlim - 1
+
+ self.assertEqual(rx_srh.payload, tx_ip)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def compare_rx_tx_packet_T_Encaps_IPv4(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing T.Encaps for IPv4
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+ # T.Encaps for IPv4 updates the headers as follows:
+ # SR Policy seglist (S3, S2, S1)
+ # SR Policy source C
+ # IPv4:
+ # in: IPv4(A, B2)
+ # out: IPv6(C, S1)SRH(S3, S2, S1; SL=2)IPv4(A, B2)
+
+ # get first (outer) IPv6 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IPv6)
+ rx_srh = None
+
+ tx_ip = tx_pkt.getlayer(IP)
+
+ # expected segment-list
+ seglist = self.sr_policy.segments
+ # reverse list to get order as in SRH
+ tx_seglist = seglist[::-1]
+
+ # get source address of SR Policy
+ sr_policy_source = self.sr_policy.source
+
+ # checks common to cases tx with and without SRH
+ # rx'ed packet should have SRH and IPv4 header
+ self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+ self.assertTrue(rx_ip.payload.haslayer(IP))
+ # get SRH
+ rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting)
+
+ # received ip.src should be equal to SR Policy source
+ self.assertEqual(rx_ip.src, sr_policy_source)
+ # received ip.dst should be equal to sidlist[lastentry]
+ self.assertEqual(rx_ip.dst, tx_seglist[-1])
+ # rx'ed seglist should be equal to seglist
+ self.assertEqual(rx_srh.addresses, tx_seglist)
+ # segleft should be equal to size seglist-1
+ self.assertEqual(rx_srh.segleft, len(tx_seglist)-1)
+ # segleft should be equal to lastentry
+ self.assertEqual(rx_srh.segleft, rx_srh.lastentry)
+
+ # the whole rx'ed pkt beyond SRH should be equal to tx'ed pkt
+ # except for the ttl field and ip checksum
+ # -> adjust tx'ed ttl to expected ttl
+ tx_ip.ttl = tx_ip.ttl - 1
+ # -> set tx'ed ip checksum to None and let scapy recompute
+ tx_ip.chksum = None
+ # read back the pkt (with str()) to force computing these fields
+ # probably other ways to accomplish this are possible
+ tx_ip = IP(scapy.compat.raw(tx_ip))
+
+ self.assertEqual(rx_srh.payload, tx_ip)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def compare_rx_tx_packet_T_Encaps_L2(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing T.Encaps for L2
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+ # T.Encaps for L2 updates the headers as follows:
+ # SR Policy seglist (S3, S2, S1)
+ # SR Policy source C
+ # L2:
+ # in: L2
+ # out: IPv6(C, S1)SRH(S3, S2, S1; SL=2)L2
+
+ # get first (outer) IPv6 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IPv6)
+ rx_srh = None
+
+ tx_ether = tx_pkt.getlayer(Ether)
+
+ # expected segment-list
+ seglist = self.sr_policy.segments
+ # reverse list to get order as in SRH
+ tx_seglist = seglist[::-1]
+
+ # get source address of SR Policy
+ sr_policy_source = self.sr_policy.source
+
+ # rx'ed packet should have SRH
+ self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+ # get SRH
+ rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting)
+
+ # received ip.src should be equal to SR Policy source
+ self.assertEqual(rx_ip.src, sr_policy_source)
+ # received ip.dst should be equal to sidlist[lastentry]
+ self.assertEqual(rx_ip.dst, tx_seglist[-1])
+ # rx'ed seglist should be equal to seglist
+ self.assertEqual(rx_srh.addresses, tx_seglist)
+ # segleft should be equal to size seglist-1
+ self.assertEqual(rx_srh.segleft, len(tx_seglist)-1)
+ # segleft should be equal to lastentry
+ self.assertEqual(rx_srh.segleft, rx_srh.lastentry)
+ # nh should be "No Next Header" (143)
+ self.assertEqual(rx_srh.nh, 143)
+
+ # the whole rx'ed pkt beyond SRH should be equal to tx'ed pkt
+ self.assertEqual(Ether(scapy.compat.raw(rx_srh.payload)), tx_ether)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def compare_rx_tx_packet_T_Insert(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing T.Insert
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+ # T.Insert updates the headers as follows:
+ # IPv6:
+ # in: IPv6(A, B2)
+ # out: IPv6(A, S1)SRH(B2, S3, S2, S1; SL=3)
+ # IPv6 + SRH:
+ # in: IPv6(A, B2)SRH(B3, B2, B1; SL=1)
+ # out: IPv6(A, S1)SRH(B2, S3, S2, S1; SL=3)SRH(B3, B2, B1; SL=1)
+
+ # get first (outer) IPv6 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IPv6)
+ rx_srh = None
+ rx_ip2 = None
+ rx_srh2 = None
+ rx_ip3 = None
+ rx_udp = rx_pkt[UDP]
+
+ tx_ip = tx_pkt.getlayer(IPv6)
+ tx_srh = None
+ tx_ip2 = None
+ # some packets have been tx'ed with an SRH, some without it
+ # get SRH if tx'ed packet has it
+ if tx_pkt.haslayer(IPv6ExtHdrSegmentRouting):
+ tx_srh = tx_pkt.getlayer(IPv6ExtHdrSegmentRouting)
+ tx_ip2 = tx_pkt.getlayer(IPv6, 2)
+ tx_udp = tx_pkt[UDP]
+
+ # expected segment-list (make copy of SR Policy segment list)
+ seglist = self.sr_policy.segments[:]
+ # expected seglist has initial dest addr as last segment
+ seglist.append(tx_ip.dst)
+ # reverse list to get order as in SRH
+ tx_seglist = seglist[::-1]
+
+ # get source address of SR Policy
+ sr_policy_source = self.sr_policy.source
+
+ # checks common to cases tx with and without SRH
+ # rx'ed packet should have SRH and only one IPv6 header
+ self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+ self.assertFalse(rx_ip.payload.haslayer(IPv6))
+ # get SRH
+ rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting)
+
+ # rx'ed ip.src should be equal to tx'ed ip.src
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ # rx'ed ip.dst should be equal to sidlist[lastentry]
+ self.assertEqual(rx_ip.dst, tx_seglist[-1])
+
+ # rx'ed seglist should be equal to expected seglist
+ self.assertEqual(rx_srh.addresses, tx_seglist)
+ # segleft should be equal to size(expected seglist)-1
+ self.assertEqual(rx_srh.segleft, len(tx_seglist)-1)
+ # segleft should be equal to lastentry
+ self.assertEqual(rx_srh.segleft, rx_srh.lastentry)
+
+ if tx_srh: # packet was tx'ed with SRH
+ # packet should have 2nd SRH
+ self.assertTrue(rx_srh.payload.haslayer(IPv6ExtHdrSegmentRouting))
+ # get 2nd SRH
+ rx_srh2 = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting, 2)
+
+ # rx'ed srh2.addresses should be equal to tx'ed srh.addresses
+ self.assertEqual(rx_srh2.addresses, tx_srh.addresses)
+ # rx'ed srh2.segleft should be equal to tx'ed srh.segleft
+ self.assertEqual(rx_srh2.segleft, tx_srh.segleft)
+ # rx'ed srh2.lastentry should be equal to tx'ed srh.lastentry
+ self.assertEqual(rx_srh2.lastentry, tx_srh.lastentry)
+
+ else: # packet was tx'ed without SRH
+ # rx packet should have no other SRH
+ self.assertFalse(rx_srh.payload.haslayer(IPv6ExtHdrSegmentRouting))
+
+ # UDP layer should be unchanged
+ self.assertEqual(rx_udp, tx_udp)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def compare_rx_tx_packet_End(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End (without PSP)
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+ # End (no PSP) updates the headers as follows:
+ # IPv6 + SRH:
+ # in: IPv6(A, S1)SRH(S3, S2, S1; SL=2)
+ # out: IPv6(A, S2)SRH(S3, S2, S1; SL=1)
+
+ # get first (outer) IPv6 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IPv6)
+ rx_srh = None
+ rx_ip2 = None
+ rx_udp = rx_pkt[UDP]
+
+ tx_ip = tx_pkt.getlayer(IPv6)
+ # we know the packet has been tx'ed
+ # with an inner IPv6 header and an SRH
+ tx_ip2 = tx_pkt.getlayer(IPv6, 2)
+ tx_srh = tx_pkt.getlayer(IPv6ExtHdrSegmentRouting)
+ tx_udp = tx_pkt[UDP]
+
+ # common checks, regardless of tx segleft value
+ # rx'ed packet should have 2nd IPv6 header
+ self.assertTrue(rx_ip.payload.haslayer(IPv6))
+ # get second (inner) IPv6 header
+ rx_ip2 = rx_pkt.getlayer(IPv6, 2)
+
+ if tx_ip.segleft > 0:
+ # SRH should NOT have been popped:
+ # End SID without PSP does not pop SRH if segleft>0
+ self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+ rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting)
+
+ # received ip.src should be equal to expected ip.src
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ # sidlist should be unchanged
+ self.assertEqual(rx_srh.addresses, tx_srh.addresses)
+ # segleft should have been decremented
+ self.assertEqual(rx_srh.segleft, tx_srh.segleft-1)
+ # received ip.dst should be equal to sidlist[segleft]
+ self.assertEqual(rx_ip.dst, rx_srh.addresses[rx_srh.segleft])
+ # lastentry should be unchanged
+ self.assertEqual(rx_srh.lastentry, tx_srh.lastentry)
+ # inner IPv6 packet (ip2) should be unchanged
+ self.assertEqual(rx_ip2.src, tx_ip2.src)
+ self.assertEqual(rx_ip2.dst, tx_ip2.dst)
+ # else: # tx_ip.segleft == 0
+ # TODO: Does this work with 2 SRHs in ingress packet?
+
+ # UDP layer should be unchanged
+ self.assertEqual(rx_udp, tx_udp)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def compare_rx_tx_packet_End_PSP(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End with PSP
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+ # End (PSP) updates the headers as follows:
+ # IPv6 + SRH (SL>1):
+ # in: IPv6(A, S1)SRH(S3, S2, S1; SL=2)
+ # out: IPv6(A, S2)SRH(S3, S2, S1; SL=1)
+ # IPv6 + SRH (SL=1):
+ # in: IPv6(A, S2)SRH(S3, S2, S1; SL=1)
+ # out: IPv6(A, S3)
+
+ # get first (outer) IPv6 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IPv6)
+ rx_srh = None
+ rx_ip2 = None
+ rx_udp = rx_pkt[UDP]
+
+ tx_ip = tx_pkt.getlayer(IPv6)
+ # we know the packet has been tx'ed
+ # with an inner IPv6 header and an SRH
+ tx_ip2 = tx_pkt.getlayer(IPv6, 2)
+ tx_srh = tx_pkt.getlayer(IPv6ExtHdrSegmentRouting)
+ tx_udp = tx_pkt[UDP]
+
+ # common checks, regardless of tx segleft value
+ self.assertTrue(rx_ip.payload.haslayer(IPv6))
+ rx_ip2 = rx_pkt.getlayer(IPv6, 2)
+ # inner IPv6 packet (ip2) should be unchanged
+ self.assertEqual(rx_ip2.src, tx_ip2.src)
+ self.assertEqual(rx_ip2.dst, tx_ip2.dst)
+
+ if tx_ip.segleft > 1:
+ # SRH should NOT have been popped:
+ # End SID with PSP does not pop SRH if segleft>1
+ # rx'ed packet should have SRH
+ self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+ rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting)
+
+ # received ip.src should be equal to expected ip.src
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ # sidlist should be unchanged
+ self.assertEqual(rx_srh.addresses, tx_srh.addresses)
+ # segleft should have been decremented
+ self.assertEqual(rx_srh.segleft, tx_srh.segleft-1)
+ # received ip.dst should be equal to sidlist[segleft]
+ self.assertEqual(rx_ip.dst, rx_srh.addresses[rx_srh.segleft])
+ # lastentry should be unchanged
+ self.assertEqual(rx_srh.lastentry, tx_srh.lastentry)
+
+ else: # tx_ip.segleft <= 1
+ # SRH should have been popped:
+ # End SID with PSP and segleft=1 pops SRH
+ # the two IPv6 headers are still present
+ # outer IPv6 header has DA == last segment of popped SRH
+ # SRH should not be present
+ self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+ # outer IPv6 header ip.src should be equal to tx'ed ip.src
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ # outer IPv6 header ip.dst should be = to tx'ed sidlist[segleft-1]
+ self.assertEqual(rx_ip.dst, tx_srh.addresses[tx_srh.segleft-1])
+
+ # UDP layer should be unchanged
+ self.assertEqual(rx_udp, tx_udp)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def compare_rx_tx_packet_End_DX6(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End.DX6
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+ # End.DX6 updates the headers as follows:
+ # IPv6 + SRH (SL=0):
+ # in: IPv6(A, S3)SRH(S3, S2, S1; SL=0)IPv6(B, D)
+ # out: IPv6(B, D)
+ # IPv6:
+ # in: IPv6(A, S3)IPv6(B, D)
+ # out: IPv6(B, D)
+
+ # get first (outer) IPv6 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IPv6)
+
+ tx_ip = tx_pkt.getlayer(IPv6)
+ tx_ip2 = tx_pkt.getlayer(IPv6, 2)
+
+ # verify if rx'ed packet has no SRH
+ self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+
+ # the whole rx_ip pkt should be equal to tx_ip2
+ # except for the hlim field
+ # -> adjust tx'ed hlim to expected hlim
+ tx_ip2.hlim = tx_ip2.hlim - 1
+
+ self.assertEqual(rx_ip, tx_ip2)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def compare_rx_tx_packet_End_DX4(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End.DX4
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+ # End.DX4 updates the headers as follows:
+ # IPv6 + SRH (SL=0):
+ # in: IPv6(A, S3)SRH(S3, S2, S1; SL=0)IPv4(B, D)
+ # out: IPv4(B, D)
+ # IPv6:
+ # in: IPv6(A, S3)IPv4(B, D)
+ # out: IPv4(B, D)
+
+ # get IPv4 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IP)
+
+ tx_ip = tx_pkt.getlayer(IPv6)
+ tx_ip2 = tx_pkt.getlayer(IP)
+
+ # verify if rx'ed packet has no SRH
+ self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+
+ # the whole rx_ip pkt should be equal to tx_ip2
+ # except for the ttl field and ip checksum
+ # -> adjust tx'ed ttl to expected ttl
+ tx_ip2.ttl = tx_ip2.ttl - 1
+ # -> set tx'ed ip checksum to None and let scapy recompute
+ tx_ip2.chksum = None
+ # read back the pkt (with str()) to force computing these fields
+ # probably other ways to accomplish this are possible
+ tx_ip2 = IP(scapy.compat.raw(tx_ip2))
+
+ self.assertEqual(rx_ip, tx_ip2)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def compare_rx_tx_packet_End_DX2(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End.DX2
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+ # End.DX2 updates the headers as follows:
+ # IPv6 + SRH (SL=0):
+ # in: IPv6(A, S3)SRH(S3, S2, S1; SL=0)L2
+ # out: L2
+ # IPv6:
+ # in: IPv6(A, S3)L2
+ # out: L2
+
+ # get IPv4 header of rx'ed packet
+ rx_eth = rx_pkt.getlayer(Ether)
+
+ tx_ip = tx_pkt.getlayer(IPv6)
+ # we can't just get the 2nd Ether layer
+ # get the Raw content and dissect it as Ether
+ tx_eth1 = Ether(scapy.compat.raw(tx_pkt[Raw]))
+
+ # verify if rx'ed packet has no SRH
+ self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+
+ # the whole rx_eth pkt should be equal to tx_eth1
+ self.assertEqual(rx_eth, tx_eth1)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def create_stream(self, src_if, dst_if, packet_header, packet_sizes,
+ count):
+ """Create SRv6 input packet stream for defined interface.
+
+ :param VppInterface src_if: Interface to create packet stream for
+ :param VppInterface dst_if: destination interface of packet stream
+ :param packet_header: Layer3 scapy packet headers,
+ L2 is added when not provided,
+ Raw(payload) with packet_info is added
+ :param list packet_sizes: packet stream pckt sizes,sequentially applied
+ to packets in stream have
+ :param int count: number of packets in packet stream
+ :return: list of packets
+ """
+ self.logger.info("Creating packets")
+ pkts = []
+ for i in range(0, count-1):
+ payload_info = self.create_packet_info(src_if, dst_if)
+ self.logger.debug(
+ "Creating packet with index %d" % (payload_info.index))
+ payload = self.info_to_payload(payload_info)
+ # add L2 header if not yet provided in packet_header
+ if packet_header.getlayer(0).name == 'Ethernet':
+ p = (packet_header /
+ Raw(payload))
+ else:
+ p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ packet_header /
+ Raw(payload))
+ size = packet_sizes[i % len(packet_sizes)]
+ self.logger.debug("Packet size %d" % (size))
+ self.extend_packet(p, size)
+ # we need to store the packet with the automatic fields computed
+ # read back the dumped packet (with str())
+ # to force computing these fields
+ # probably other ways are possible
+ p = Ether(scapy.compat.raw(p))
+ payload_info.data = p.copy()
+ self.logger.debug(ppp("Created packet:", p))
+ pkts.append(p)
+ self.logger.info("Done creating packets")
+ return pkts
+
+ def send_and_verify_pkts(self, input, pkts, output, compare_func,
+ expected_count=None):
+ """Send packets and verify received packets using compare_func
+
+ :param input: ingress interface of DUT
+ :param pkts: list of packets to transmit
+ :param output: egress interface of DUT
+ :param compare_func: function to compare in and out packets
+ :param expected_count: expected number of captured packets (if
+ different than len(pkts))
+ """
+ # add traffic stream to input interface
+ input.add_stream(pkts)
+
+ # enable capture on all interfaces
+ self.pg_enable_capture(self.pg_interfaces)
+
+ # start traffic
+ self.logger.info("Starting traffic")
+ self.pg_start()
+
+ # get output capture
+ self.logger.info("Getting packet capture")
+ capture = output.get_capture(expected_count=expected_count)
+
+ # assert nothing was captured on input interface
+ input.assert_nothing_captured()
+
+ # verify captured packets
+ self.verify_captured_pkts(output, capture, compare_func)
+
+ def create_packet_header_IPv6(self, dst):
+ """Create packet header: IPv6 header, UDP header
+
+ :param dst: IPv6 destination address
+
+ IPv6 source address is 1234::1
+ UDP source port and destination port are 1234
+ """
+
+ p = (IPv6(src='1234::1', dst=dst) /
+ UDP(sport=1234, dport=1234))
+ return p
+
+ def create_packet_header_IPv6_SRH(self, sidlist, segleft):
+ """Create packet header: IPv6 header with SRH, UDP header
+
+ :param list sidlist: segment list
+ :param int segleft: segments-left field value
+
+ IPv6 destination address is set to sidlist[segleft]
+ IPv6 source addresses are 1234::1 and 4321::1
+ UDP source port and destination port are 1234
+ """
+
+ p = (IPv6(src='1234::1', dst=sidlist[segleft]) /
+ IPv6ExtHdrSegmentRouting(addresses=sidlist) /
+ UDP(sport=1234, dport=1234))
+ return p
+
+ def create_packet_header_IPv6_SRH_IPv6(self, dst, sidlist, segleft):
+ """Create packet header: IPv6 encapsulated in SRv6:
+ IPv6 header with SRH, IPv6 header, UDP header
+
+ :param ipv6address dst: inner IPv6 destination address
+ :param list sidlist: segment list of outer IPv6 SRH
+ :param int segleft: segments-left field of outer IPv6 SRH
+
+ Outer IPv6 destination address is set to sidlist[segleft]
+ IPv6 source addresses are 1234::1 and 4321::1
+ UDP source port and destination port are 1234
+ """
+
+ p = (IPv6(src='1234::1', dst=sidlist[segleft]) /
+ IPv6ExtHdrSegmentRouting(addresses=sidlist,
+ segleft=segleft, nh=41) /
+ IPv6(src='4321::1', dst=dst) /
+ UDP(sport=1234, dport=1234))
+ return p
+
+ def create_packet_header_IPv6_IPv6(self, dst_inner, dst_outer):
+ """Create packet header: IPv6 encapsulated in IPv6:
+ IPv6 header, IPv6 header, UDP header
+
+ :param ipv6address dst_inner: inner IPv6 destination address
+ :param ipv6address dst_outer: outer IPv6 destination address
+
+ IPv6 source addresses are 1234::1 and 4321::1
+ UDP source port and destination port are 1234
+ """
+
+ p = (IPv6(src='1234::1', dst=dst_outer) /
+ IPv6(src='4321::1', dst=dst_inner) /
+ UDP(sport=1234, dport=1234))
+ return p
+
+ def create_packet_header_IPv6_SRH_SRH_IPv6(self, dst, sidlist1, segleft1,
+ sidlist2, segleft2):
+ """Create packet header: IPv6 encapsulated in SRv6 with 2 SRH:
+ IPv6 header with SRH, 2nd SRH, IPv6 header, UDP header
+
+ :param ipv6address dst: inner IPv6 destination address
+ :param list sidlist1: segment list of outer IPv6 SRH
+ :param int segleft1: segments-left field of outer IPv6 SRH
+ :param list sidlist2: segment list of inner IPv6 SRH
+ :param int segleft2: segments-left field of inner IPv6 SRH
+
+ Outer IPv6 destination address is set to sidlist[segleft]
+ IPv6 source addresses are 1234::1 and 4321::1
+ UDP source port and destination port are 1234
+ """
+
+ p = (IPv6(src='1234::1', dst=sidlist1[segleft1]) /
+ IPv6ExtHdrSegmentRouting(addresses=sidlist1,
+ segleft=segleft1, nh=43) /
+ IPv6ExtHdrSegmentRouting(addresses=sidlist2,
+ segleft=segleft2, nh=41) /
+ IPv6(src='4321::1', dst=dst) /
+ UDP(sport=1234, dport=1234))
+ return p
+
+ def create_packet_header_IPv4(self, dst):
+ """Create packet header: IPv4 header, UDP header
+
+ :param dst: IPv4 destination address
+
+ IPv4 source address is 123.1.1.1
+ UDP source port and destination port are 1234
+ """
+
+ p = (IP(src='123.1.1.1', dst=dst) /
+ UDP(sport=1234, dport=1234))
+ return p
+
+ def create_packet_header_IPv6_IPv4(self, dst_inner, dst_outer):
+ """Create packet header: IPv4 encapsulated in IPv6:
+ IPv6 header, IPv4 header, UDP header
+
+ :param ipv4address dst_inner: inner IPv4 destination address
+ :param ipv6address dst_outer: outer IPv6 destination address
+
+ IPv6 source address is 1234::1
+ IPv4 source address is 123.1.1.1
+ UDP source port and destination port are 1234
+ """
+
+ p = (IPv6(src='1234::1', dst=dst_outer) /
+ IP(src='123.1.1.1', dst=dst_inner) /
+ UDP(sport=1234, dport=1234))
+ return p
+
+ def create_packet_header_IPv6_SRH_IPv4(self, dst, sidlist, segleft):
+ """Create packet header: IPv4 encapsulated in SRv6:
+ IPv6 header with SRH, IPv4 header, UDP header
+
+ :param ipv4address dst: inner IPv4 destination address
+ :param list sidlist: segment list of outer IPv6 SRH
+ :param int segleft: segments-left field of outer IPv6 SRH
+
+ Outer IPv6 destination address is set to sidlist[segleft]
+ IPv6 source address is 1234::1
+ IPv4 source address is 123.1.1.1
+ UDP source port and destination port are 1234
+ """
+
+ p = (IPv6(src='1234::1', dst=sidlist[segleft]) /
+ IPv6ExtHdrSegmentRouting(addresses=sidlist,
+ segleft=segleft, nh=4) /
+ IP(src='123.1.1.1', dst=dst) /
+ UDP(sport=1234, dport=1234))
+ return p
+
+ def create_packet_header_L2(self, vlan=0):
+ """Create packet header: L2 header
+
+ :param vlan: if vlan!=0 then add 802.1q header
+ """
+ # Note: the dst addr ('00:55:44:33:22:11') is used in
+ # the compare function compare_rx_tx_packet_T_Encaps_L2
+ # to detect presence of L2 in SRH payload
+ p = Ether(src='00:11:22:33:44:55', dst='00:55:44:33:22:11')
+ etype = 0x8137 # IPX
+ if vlan:
+ # add 802.1q layer
+ p /= Dot1Q(vlan=vlan, type=etype)
+ else:
+ p.type = etype
+ return p
+
+ def create_packet_header_IPv6_SRH_L2(self, sidlist, segleft, vlan=0):
+ """Create packet header: L2 encapsulated in SRv6:
+ IPv6 header with SRH, L2
+
+ :param list sidlist: segment list of outer IPv6 SRH
+ :param int segleft: segments-left field of outer IPv6 SRH
+ :param vlan: L2 vlan; if vlan!=0 then add 802.1q header
+
+ Outer IPv6 destination address is set to sidlist[segleft]
+ IPv6 source address is 1234::1
+ """
+ eth = Ether(src='00:11:22:33:44:55', dst='00:55:44:33:22:11')
+ etype = 0x8137 # IPX
+ if vlan:
+ # add 802.1q layer
+ eth /= Dot1Q(vlan=vlan, type=etype)
+ else:
+ eth.type = etype
+
+ p = (IPv6(src='1234::1', dst=sidlist[segleft]) /
+ IPv6ExtHdrSegmentRouting(addresses=sidlist,
+ segleft=segleft, nh=143) /
+ eth)
+ return p
+
+ def create_packet_header_IPv6_L2(self, dst_outer, vlan=0):
+ """Create packet header: L2 encapsulated in IPv6:
+ IPv6 header, L2
+
+ :param ipv6address dst_outer: outer IPv6 destination address
+ :param vlan: L2 vlan; if vlan!=0 then add 802.1q header
+ """
+ eth = Ether(src='00:11:22:33:44:55', dst='00:55:44:33:22:11')
+ etype = 0x8137 # IPX
+ if vlan:
+ # add 802.1q layer
+ eth /= Dot1Q(vlan=vlan, type=etype)
+ else:
+ eth.type = etype
+
+ p = (IPv6(src='1234::1', dst=dst_outer, nh=143) / eth)
+ return p
+
+ def get_payload_info(self, packet):
+ """ Extract the payload_info from the packet
+ """
+ # in most cases, payload_info is in packet[Raw]
+ # but packet[Raw] gives the complete payload
+ # (incl L2 header) for the T.Encaps L2 case
+ try:
+ payload_info = self.payload_to_info(packet[Raw])
+
+ except:
+ # remote L2 header from packet[Raw]:
+ # take packet[Raw], convert it to an Ether layer
+ # and then extract Raw from it
+ payload_info = self.payload_to_info(
+ Ether(scapy.compat.r(packet[Raw]))[Raw])
+
+ return payload_info
+
+ def verify_captured_pkts(self, dst_if, capture, compare_func):
+ """
+ Verify captured packet stream for specified interface.
+ Compare ingress with egress packets using the specified compare fn
+
+ :param dst_if: egress interface of DUT
+ :param capture: captured packets
+ :param compare_func: function to compare in and out packet
+ """
+ self.logger.info("Verifying capture on interface %s using function %s"
+ % (dst_if.name, compare_func.__name__))
+
+ last_info = dict()
+ for i in self.pg_interfaces:
+ last_info[i.sw_if_index] = None
+ dst_sw_if_index = dst_if.sw_if_index
+
+ for packet in capture:
+ try:
+ # extract payload_info from packet's payload
+ payload_info = self.get_payload_info(packet)
+ packet_index = payload_info.index
+
+ self.logger.debug("Verifying packet with index %d"
+ % (packet_index))
+ # packet should have arrived on the expected interface
+ self.assertEqual(payload_info.dst, dst_sw_if_index)
+ self.logger.debug(
+ "Got packet on interface %s: src=%u (idx=%u)" %
+ (dst_if.name, payload_info.src, packet_index))
+
+ # search for payload_info with same src and dst if_index
+ # this will give us the transmitted packet
+ next_info = self.get_next_packet_info_for_interface2(
+ payload_info.src, dst_sw_if_index,
+ last_info[payload_info.src])
+ last_info[payload_info.src] = next_info
+ # next_info should not be None
+ self.assertTrue(next_info is not None)
+ # index of tx and rx packets should be equal
+ self.assertEqual(packet_index, next_info.index)
+ # data field of next_info contains the tx packet
+ txed_packet = next_info.data
+
+ self.logger.debug(ppp("Transmitted packet:",
+ txed_packet)) # ppp=Pretty Print Packet
+
+ self.logger.debug(ppp("Received packet:", packet))
+
+ # compare rcvd packet with expected packet using compare_func
+ compare_func(txed_packet, packet)
+
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ # FIXME: there is no need to check manually that all the packets
+ # arrived (already done so by get_capture); checking here
+ # prevents testing packets that are expected to be dropped, so
+ # commenting this out for now
+
+ # have all expected packets arrived?
+ # for i in self.pg_interfaces:
+ # remaining_packet = self.get_next_packet_info_for_interface2(
+ # i.sw_if_index, dst_sw_if_index, last_info[i.sw_if_index])
+ # self.assertTrue(remaining_packet is None,
+ # "Interface %s: Packet expected from interface %s "
+ # "didn't arrive" % (dst_if.name, i.name))
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_srv6_ad.py b/test/test_srv6_ad.py
new file mode 100644
index 00000000000..2627df32aa9
--- /dev/null
+++ b/test/test_srv6_ad.py
@@ -0,0 +1,809 @@
+#!/usr/bin/env python3
+
+import unittest
+import binascii
+from socket import AF_INET6
+
+from framework import VppTestCase, VppTestRunner
+from vpp_ip import DpoProto
+from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpTable
+from vpp_srv6 import SRv6LocalSIDBehaviors, VppSRv6LocalSID, VppSRv6Policy, \
+ SRv6PolicyType, VppSRv6Steering, SRv6PolicySteeringTypes
+
+import scapy.compat
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether, Dot1Q
+from scapy.layers.inet6 import IPv6, UDP, IPv6ExtHdrSegmentRouting
+from scapy.layers.inet import IP, UDP
+
+from util import ppp
+
+
+class TestSRv6(VppTestCase):
+ """ SRv6 Dynamic Proxy plugin Test Case """
+
+ @classmethod
+ def setUpClass(self):
+ super(TestSRv6, self).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestSRv6, cls).tearDownClass()
+
+ def setUp(self):
+ """ Perform test setup before each test case.
+ """
+ super(TestSRv6, self).setUp()
+
+ # packet sizes, inclusive L2 overhead
+ self.pg_packet_sizes = [64, 512, 1518, 9018]
+
+ # reset packet_infos
+ self.reset_packet_infos()
+
+ def tearDown(self):
+ """ Clean up test setup after each test case.
+ """
+ self.teardown_interfaces()
+
+ super(TestSRv6, self).tearDown()
+
+ def configure_interface(self,
+ interface,
+ ipv6=False, ipv4=False,
+ ipv6_table_id=0, ipv4_table_id=0):
+ """ Configure interface.
+ :param ipv6: configure IPv6 on interface
+ :param ipv4: configure IPv4 on interface
+ :param ipv6_table_id: FIB table_id for IPv6
+ :param ipv4_table_id: FIB table_id for IPv4
+ """
+ self.logger.debug("Configuring interface %s" % (interface.name))
+ if ipv6:
+ self.logger.debug("Configuring IPv6")
+ interface.set_table_ip6(ipv6_table_id)
+ interface.config_ip6()
+ interface.resolve_ndp(timeout=5)
+ if ipv4:
+ self.logger.debug("Configuring IPv4")
+ interface.set_table_ip4(ipv4_table_id)
+ interface.config_ip4()
+ interface.resolve_arp()
+ interface.admin_up()
+
+ def setup_interfaces(self, ipv6=[], ipv4=[],
+ ipv6_table_id=[], ipv4_table_id=[]):
+ """ Create and configure interfaces.
+
+ :param ipv6: list of interface IPv6 capabilities
+ :param ipv4: list of interface IPv4 capabilities
+ :param ipv6_table_id: list of intf IPv6 FIB table_ids
+ :param ipv4_table_id: list of intf IPv4 FIB table_ids
+ :returns: List of created interfaces.
+ """
+ # how many interfaces?
+ if len(ipv6):
+ count = len(ipv6)
+ else:
+ count = len(ipv4)
+ self.logger.debug("Creating and configuring %d interfaces" % (count))
+
+ # fill up ipv6 and ipv4 lists if needed
+ # not enabled (False) is the default
+ if len(ipv6) < count:
+ ipv6 += (count - len(ipv6)) * [False]
+ if len(ipv4) < count:
+ ipv4 += (count - len(ipv4)) * [False]
+
+ # fill up table_id lists if needed
+ # table_id 0 (global) is the default
+ if len(ipv6_table_id) < count:
+ ipv6_table_id += (count - len(ipv6_table_id)) * [0]
+ if len(ipv4_table_id) < count:
+ ipv4_table_id += (count - len(ipv4_table_id)) * [0]
+
+ # create 'count' pg interfaces
+ self.create_pg_interfaces(range(count))
+
+ # setup all interfaces
+ for i in range(count):
+ intf = self.pg_interfaces[i]
+ self.configure_interface(intf,
+ ipv6[i], ipv4[i],
+ ipv6_table_id[i], ipv4_table_id[i])
+
+ if any(ipv6):
+ self.logger.debug(self.vapi.cli("show ip6 neighbors"))
+ if any(ipv4):
+ self.logger.debug(self.vapi.cli("show ip4 neighbors"))
+ self.logger.debug(self.vapi.cli("show interface"))
+ self.logger.debug(self.vapi.cli("show hardware"))
+
+ return self.pg_interfaces
+
+ def teardown_interfaces(self):
+ """ Unconfigure and bring down interface.
+ """
+ self.logger.debug("Tearing down interfaces")
+ # tear down all interfaces
+ # AFAIK they cannot be deleted
+ for i in self.pg_interfaces:
+ self.logger.debug("Tear down interface %s" % (i.name))
+ i.admin_down()
+ i.unconfig()
+ i.set_table_ip4(0)
+ i.set_table_ip6(0)
+
+ def test_SRv6_End_AD_IPv6(self):
+ """ Test SRv6 End.AD behavior with IPv6 traffic.
+ """
+ self.src_addr = 'a0::'
+ self.sid_list = ['a1::', 'a2::a6', 'a3::']
+ self.test_sid_index = 1
+
+ # send traffic to one destination interface
+ # source and destination interfaces are IPv6 only
+ self.setup_interfaces(ipv6=[True, True])
+
+ # configure route to next segment
+ route = VppIpRoute(self, self.sid_list[self.test_sid_index + 1], 128,
+ [VppRoutePath(self.pg0.remote_ip6,
+ self.pg0.sw_if_index,
+ proto=DpoProto.DPO_PROTO_IP6)])
+ route.add_vpp_config()
+
+ # configure SRv6 localSID behavior
+ cli_str = "sr localsid address " + \
+ self.sid_list[self.test_sid_index] + \
+ " behavior end.ad" + \
+ " nh " + self.pg1.remote_ip6 + \
+ " oif " + self.pg1.name + \
+ " iif " + self.pg1.name
+ self.vapi.cli(cli_str)
+
+ # log the localsids
+ self.logger.debug(self.vapi.cli("show sr localsid"))
+
+ # send one packet per packet size
+ count = len(self.pg_packet_sizes)
+
+ # prepare IPv6 in SRv6 headers
+ packet_header1 = self.create_packet_header_IPv6_SRH_IPv6(
+ srcaddr=self.src_addr,
+ sidlist=self.sid_list[::-1],
+ segleft=len(self.sid_list) - self.test_sid_index - 1)
+
+ # generate packets (pg0->pg1)
+ pkts1 = self.create_stream(self.pg0, self.pg1, packet_header1,
+ self.pg_packet_sizes, count)
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg0, pkts1, self.pg1,
+ self.compare_rx_tx_packet_End_AD_IPv6_out)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # prepare IPv6 header for returning packets
+ packet_header2 = self.create_packet_header_IPv6()
+
+ # generate returning packets (pg1->pg0)
+ pkts2 = self.create_stream(self.pg1, self.pg0, packet_header2,
+ self.pg_packet_sizes, count)
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg1, pkts2, self.pg0,
+ self.compare_rx_tx_packet_End_AD_IPv6_in)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # remove SRv6 localSIDs
+ cli_str = "sr localsid del address " + \
+ self.sid_list[self.test_sid_index]
+ self.vapi.cli(cli_str)
+
+ # cleanup interfaces
+ self.teardown_interfaces()
+
+ def compare_rx_tx_packet_End_AD_IPv6_out(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End.AD with IPv6
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+
+ # get first (outer) IPv6 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IPv6)
+
+ tx_ip = tx_pkt.getlayer(IPv6)
+ tx_ip2 = tx_pkt.getlayer(IPv6, 2)
+
+ # verify if rx'ed packet has no SRH
+ self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+
+ # the whole rx_ip pkt should be equal to tx_ip2
+ # except for the hlim field
+ # -> adjust tx'ed hlim to expected hlim
+ tx_ip2.hlim = tx_ip2.hlim - 1
+
+ self.assertEqual(rx_ip, tx_ip2)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def compare_rx_tx_packet_End_AD_IPv6_in(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End.AD
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+
+ # get first (outer) IPv6 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IPv6)
+ # received ip.src should be equal to SR Policy source
+ self.assertEqual(rx_ip.src, self.src_addr)
+ # received ip.dst should be equal to expected sidlist next segment
+ self.assertEqual(rx_ip.dst, self.sid_list[self.test_sid_index + 1])
+
+ # rx'ed packet should have SRH
+ self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+
+ # get SRH
+ rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting)
+ # rx'ed seglist should be equal to SID-list in reversed order
+ self.assertEqual(rx_srh.addresses, self.sid_list[::-1])
+ # segleft should be equal to previous segleft value minus 1
+ self.assertEqual(rx_srh.segleft,
+ len(self.sid_list) - self.test_sid_index - 2)
+ # lastentry should be equal to the SID-list length minus 1
+ self.assertEqual(rx_srh.lastentry, len(self.sid_list) - 1)
+
+ # the whole rx'ed pkt beyond SRH should be equal to tx'ed pkt
+ # except for the hop-limit field
+ tx_ip = tx_pkt.getlayer(IPv6)
+ # -> update tx'ed hlim to the expected hlim
+ tx_ip.hlim -= 1
+ # -> check payload
+ self.assertEqual(rx_srh.payload, tx_ip)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def test_SRv6_End_AD_IPv4(self):
+ """ Test SRv6 End.AD behavior with IPv4 traffic.
+ """
+ self.src_addr = 'a0::'
+ self.sid_list = ['a1::', 'a2::a4', 'a3::']
+ self.test_sid_index = 1
+
+ # send traffic to one destination interface
+ # source and destination interfaces are IPv6 only
+ self.setup_interfaces(ipv6=[True, False], ipv4=[False, True])
+
+ # configure route to next segment
+ route = VppIpRoute(self, self.sid_list[self.test_sid_index + 1], 128,
+ [VppRoutePath(self.pg0.remote_ip6,
+ self.pg0.sw_if_index,
+ proto=DpoProto.DPO_PROTO_IP6)])
+ route.add_vpp_config()
+
+ # configure SRv6 localSID behavior
+ cli_str = "sr localsid address " + \
+ self.sid_list[self.test_sid_index] + \
+ " behavior end.ad" + \
+ " nh " + self.pg1.remote_ip4 + \
+ " oif " + self.pg1.name + \
+ " iif " + self.pg1.name
+ self.vapi.cli(cli_str)
+
+ # log the localsids
+ self.logger.debug(self.vapi.cli("show sr localsid"))
+
+ # send one packet per packet size
+ count = len(self.pg_packet_sizes)
+
+ # prepare IPv4 in SRv6 headers
+ packet_header1 = self.create_packet_header_IPv6_SRH_IPv4(
+ srcaddr=self.src_addr,
+ sidlist=self.sid_list[::-1],
+ segleft=len(self.sid_list) - self.test_sid_index - 1)
+
+ # generate packets (pg0->pg1)
+ pkts1 = self.create_stream(self.pg0, self.pg1, packet_header1,
+ self.pg_packet_sizes, count)
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg0, pkts1, self.pg1,
+ self.compare_rx_tx_packet_End_AD_IPv4_out)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # prepare IPv6 header for returning packets
+ packet_header2 = self.create_packet_header_IPv4()
+
+ # generate returning packets (pg1->pg0)
+ pkts2 = self.create_stream(self.pg1, self.pg0, packet_header2,
+ self.pg_packet_sizes, count)
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg1, pkts2, self.pg0,
+ self.compare_rx_tx_packet_End_AD_IPv4_in)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # remove SRv6 localSIDs
+ cli_str = "sr localsid del address " + \
+ self.sid_list[self.test_sid_index]
+ self.vapi.cli(cli_str)
+
+ # cleanup interfaces
+ self.teardown_interfaces()
+
+ def compare_rx_tx_packet_End_AD_IPv4_out(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End.AD with IPv4
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+
+ # get IPv4 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IP)
+
+ tx_ip = tx_pkt.getlayer(IPv6)
+ tx_ip2 = tx_pkt.getlayer(IP)
+
+ # verify if rx'ed packet has no SRH
+ self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+
+ # the whole rx_ip pkt should be equal to tx_ip2
+ # except for the ttl field and ip checksum
+ # -> adjust tx'ed ttl to expected ttl
+ tx_ip2.ttl = tx_ip2.ttl - 1
+ # -> set tx'ed ip checksum to None and let scapy recompute
+ tx_ip2.chksum = None
+ # read back the pkt (with str()) to force computing these fields
+ # probably other ways to accomplish this are possible
+ tx_ip2 = IP(scapy.compat.raw(tx_ip2))
+
+ self.assertEqual(rx_ip, tx_ip2)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def compare_rx_tx_packet_End_AD_IPv4_in(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End.AD
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+
+ # get first (outer) IPv6 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IPv6)
+ # received ip.src should be equal to SR Policy source
+ self.assertEqual(rx_ip.src, self.src_addr)
+ # received ip.dst should be equal to expected sidlist next segment
+ self.assertEqual(rx_ip.dst, self.sid_list[self.test_sid_index + 1])
+
+ # rx'ed packet should have SRH
+ self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+
+ # get SRH
+ rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting)
+ # rx'ed seglist should be equal to SID-list in reversed order
+ self.assertEqual(rx_srh.addresses, self.sid_list[::-1])
+ # segleft should be equal to previous segleft value minus 1
+ self.assertEqual(rx_srh.segleft,
+ len(self.sid_list) - self.test_sid_index - 2)
+ # lastentry should be equal to the SID-list length minus 1
+ self.assertEqual(rx_srh.lastentry, len(self.sid_list) - 1)
+
+ # the whole rx'ed pkt beyond SRH should be equal to tx'ed pkt
+ # except for the ttl field and ip checksum
+ tx_ip = tx_pkt.getlayer(IP)
+ # -> adjust tx'ed ttl to expected ttl
+ tx_ip.ttl = tx_ip.ttl - 1
+ # -> set tx'ed ip checksum to None and let scapy recompute
+ tx_ip.chksum = None
+ # -> read back the pkt (with str()) to force computing these fields
+ # probably other ways to accomplish this are possible
+ self.assertEqual(rx_srh.payload, IP(scapy.compat.raw(tx_ip)))
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def test_SRv6_End_AD_L2(self):
+ """ Test SRv6 End.AD behavior with L2 traffic.
+ """
+ self.src_addr = 'a0::'
+ self.sid_list = ['a1::', 'a2::a4', 'a3::']
+ self.test_sid_index = 1
+
+ # send traffic to one destination interface
+ # source and destination interfaces are IPv6 only
+ self.setup_interfaces(ipv6=[True, False])
+
+ # configure route to next segment
+ route = VppIpRoute(self, self.sid_list[self.test_sid_index + 1], 128,
+ [VppRoutePath(self.pg0.remote_ip6,
+ self.pg0.sw_if_index,
+ proto=DpoProto.DPO_PROTO_IP6)])
+ route.add_vpp_config()
+
+ # configure SRv6 localSID behavior
+ cli_str = "sr localsid address " + \
+ self.sid_list[self.test_sid_index] + \
+ " behavior end.ad" + \
+ " oif " + self.pg1.name + \
+ " iif " + self.pg1.name
+ self.vapi.cli(cli_str)
+
+ # log the localsids
+ self.logger.debug(self.vapi.cli("show sr localsid"))
+
+ # send one packet per packet size
+ count = len(self.pg_packet_sizes)
+
+ # prepare L2 in SRv6 headers
+ packet_header1 = self.create_packet_header_IPv6_SRH_L2(
+ srcaddr=self.src_addr,
+ sidlist=self.sid_list[::-1],
+ segleft=len(self.sid_list) - self.test_sid_index - 1,
+ vlan=0)
+
+ # generate packets (pg0->pg1)
+ pkts1 = self.create_stream(self.pg0, self.pg1, packet_header1,
+ self.pg_packet_sizes, count)
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg0, pkts1, self.pg1,
+ self.compare_rx_tx_packet_End_AD_L2_out)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # prepare L2 header for returning packets
+ packet_header2 = self.create_packet_header_L2()
+
+ # generate returning packets (pg1->pg0)
+ pkts2 = self.create_stream(self.pg1, self.pg0, packet_header2,
+ self.pg_packet_sizes, count)
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg1, pkts2, self.pg0,
+ self.compare_rx_tx_packet_End_AD_L2_in)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # remove SRv6 localSIDs
+ cli_str = "sr localsid del address " + \
+ self.sid_list[self.test_sid_index]
+ self.vapi.cli(cli_str)
+
+ # cleanup interfaces
+ self.teardown_interfaces()
+
+ def compare_rx_tx_packet_End_AD_L2_out(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End.AD with L2
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+
+ # get IPv4 header of rx'ed packet
+ rx_eth = rx_pkt.getlayer(Ether)
+
+ tx_ip = tx_pkt.getlayer(IPv6)
+ # we can't just get the 2nd Ether layer
+ # get the Raw content and dissect it as Ether
+ tx_eth1 = Ether(scapy.compat.raw(tx_pkt[Raw]))
+
+ # verify if rx'ed packet has no SRH
+ self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+
+ # the whole rx_eth pkt should be equal to tx_eth1
+ self.assertEqual(rx_eth, tx_eth1)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def compare_rx_tx_packet_End_AD_L2_in(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End.AD
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+
+ ####
+ # get first (outer) IPv6 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IPv6)
+ # received ip.src should be equal to SR Policy source
+ self.assertEqual(rx_ip.src, self.src_addr)
+ # received ip.dst should be equal to expected sidlist next segment
+ self.assertEqual(rx_ip.dst, self.sid_list[self.test_sid_index + 1])
+
+ # rx'ed packet should have SRH
+ self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+
+ # get SRH
+ rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting)
+ # rx'ed seglist should be equal to SID-list in reversed order
+ self.assertEqual(rx_srh.addresses, self.sid_list[::-1])
+ # segleft should be equal to previous segleft value minus 1
+ self.assertEqual(rx_srh.segleft,
+ len(self.sid_list) - self.test_sid_index - 2)
+ # lastentry should be equal to the SID-list length minus 1
+ self.assertEqual(rx_srh.lastentry, len(self.sid_list) - 1)
+
+ # the whole rx'ed pkt beyond SRH should be equal to tx'ed pkt
+ tx_ether = tx_pkt.getlayer(Ether)
+ self.assertEqual(Ether(scapy.compat.raw(rx_srh.payload)), tx_ether)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def create_stream(self, src_if, dst_if, packet_header, packet_sizes,
+ count):
+ """Create SRv6 input packet stream for defined interface.
+
+ :param VppInterface src_if: Interface to create packet stream for
+ :param VppInterface dst_if: destination interface of packet stream
+ :param packet_header: Layer3 scapy packet headers,
+ L2 is added when not provided,
+ Raw(payload) with packet_info is added
+ :param list packet_sizes: packet stream pckt sizes,sequentially applied
+ to packets in stream have
+ :param int count: number of packets in packet stream
+ :return: list of packets
+ """
+ self.logger.info("Creating packets")
+ pkts = []
+ for i in range(0, count - 1):
+ payload_info = self.create_packet_info(src_if, dst_if)
+ self.logger.debug(
+ "Creating packet with index %d" % (payload_info.index))
+ payload = self.info_to_payload(payload_info)
+ # add L2 header if not yet provided in packet_header
+ if packet_header.getlayer(0).name == 'Ethernet':
+ p = packet_header / Raw(payload)
+ else:
+ p = Ether(dst=src_if.local_mac, src=src_if.remote_mac) / \
+ packet_header / Raw(payload)
+ size = packet_sizes[i % len(packet_sizes)]
+ self.logger.debug("Packet size %d" % (size))
+ self.extend_packet(p, size)
+ # we need to store the packet with the automatic fields computed
+ # read back the dumped packet (with str())
+ # to force computing these fields
+ # probably other ways are possible
+ p = Ether(scapy.compat.raw(p))
+ payload_info.data = p.copy()
+ self.logger.debug(ppp("Created packet:", p))
+ pkts.append(p)
+ self.logger.info("Done creating packets")
+ return pkts
+
+ def send_and_verify_pkts(self, input, pkts, output, compare_func):
+ """Send packets and verify received packets using compare_func
+
+ :param input: ingress interface of DUT
+ :param pkts: list of packets to transmit
+ :param output: egress interface of DUT
+ :param compare_func: function to compare in and out packets
+ """
+ # add traffic stream to input interface
+ input.add_stream(pkts)
+
+ # enable capture on all interfaces
+ self.pg_enable_capture(self.pg_interfaces)
+
+ # start traffic
+ self.logger.info("Starting traffic")
+ self.pg_start()
+
+ # get output capture
+ self.logger.info("Getting packet capture")
+ capture = output.get_capture()
+
+ # assert nothing was captured on input interface
+ # input.assert_nothing_captured()
+
+ # verify captured packets
+ self.verify_captured_pkts(output, capture, compare_func)
+
+ def create_packet_header_IPv6(self):
+ """Create packet header: IPv6 header, UDP header
+
+ :param dst: IPv6 destination address
+
+ IPv6 source address is 1234::1
+ IPv6 destination address is 4321::1
+ UDP source port and destination port are 1234
+ """
+
+ p = IPv6(src='1234::1', dst='4321::1') / UDP(sport=1234, dport=1234)
+ return p
+
+ def create_packet_header_IPv6_SRH_IPv6(self, srcaddr, sidlist, segleft):
+ """Create packet header: IPv6 encapsulated in SRv6:
+ IPv6 header with SRH, IPv6 header, UDP header
+
+ :param int srcaddr: outer source address
+ :param list sidlist: segment list of outer IPv6 SRH
+ :param int segleft: segments-left field of outer IPv6 SRH
+
+ Outer IPv6 source address is set to srcaddr
+ Outer IPv6 destination address is set to sidlist[segleft]
+ Inner IPv6 source addresses is 1234::1
+ Inner IPv6 destination address is 4321::1
+ UDP source port and destination port are 1234
+ """
+
+ p = IPv6(src=srcaddr, dst=sidlist[segleft]) / \
+ IPv6ExtHdrSegmentRouting(addresses=sidlist,
+ segleft=segleft, nh=41) / \
+ IPv6(src='1234::1', dst='4321::1') / \
+ UDP(sport=1234, dport=1234)
+ return p
+
+ def create_packet_header_IPv4(self):
+ """Create packet header: IPv4 header, UDP header
+
+ :param dst: IPv4 destination address
+
+ IPv4 source address is 123.1.1.1
+ IPv4 destination address is 124.1.1.1
+ UDP source port and destination port are 1234
+ """
+
+ p = IP(src='123.1.1.1', dst='124.1.1.1') / UDP(sport=1234, dport=1234)
+ return p
+
+ def create_packet_header_IPv6_SRH_IPv4(self, srcaddr, sidlist, segleft):
+ """Create packet header: IPv4 encapsulated in SRv6:
+ IPv6 header with SRH, IPv4 header, UDP header
+
+ :param int srcaddr: outer source address
+ :param list sidlist: segment list of outer IPv6 SRH
+ :param int segleft: segments-left field of outer IPv6 SRH
+
+ Outer IPv6 source address is set to srcaddr
+ Outer IPv6 destination address is set to sidlist[segleft]
+ Inner IPv4 source address is 123.1.1.1
+ Inner IPv4 destination address is 124.1.1.1
+ UDP source port and destination port are 1234
+ """
+
+ p = IPv6(src=srcaddr, dst=sidlist[segleft]) / \
+ IPv6ExtHdrSegmentRouting(addresses=sidlist,
+ segleft=segleft, nh=4) / \
+ IP(src='123.1.1.1', dst='124.1.1.1') / \
+ UDP(sport=1234, dport=1234)
+ return p
+
+ def create_packet_header_L2(self, vlan=0):
+ """Create packet header: L2 header
+
+ :param vlan: if vlan!=0 then add 802.1q header
+ """
+ # Note: the dst addr ('00:55:44:33:22:11') is used in
+ # the compare function compare_rx_tx_packet_T_Encaps_L2
+ # to detect presence of L2 in SRH payload
+ p = Ether(src='00:11:22:33:44:55', dst='00:55:44:33:22:11')
+ etype = 0x8137 # IPX
+ if vlan:
+ # add 802.1q layer
+ p /= Dot1Q(vlan=vlan, type=etype)
+ else:
+ p.type = etype
+ return p
+
+ def create_packet_header_IPv6_SRH_L2(self, srcaddr, sidlist, segleft,
+ vlan=0):
+ """Create packet header: L2 encapsulated in SRv6:
+ IPv6 header with SRH, L2
+
+ :param int srcaddr: IPv6 source address
+ :param list sidlist: segment list of outer IPv6 SRH
+ :param int segleft: segments-left field of outer IPv6 SRH
+ :param vlan: L2 vlan; if vlan!=0 then add 802.1q header
+
+ IPv6 source address is set to srcaddr
+ IPv6 destination address is set to sidlist[segleft]
+ """
+ eth = Ether(src='00:11:22:33:44:55', dst='00:55:44:33:22:11')
+ etype = 0x8137 # IPX
+ if vlan:
+ # add 802.1q layer
+ eth /= Dot1Q(vlan=vlan, type=etype)
+ else:
+ eth.type = etype
+
+ p = IPv6(src=srcaddr, dst=sidlist[segleft]) / \
+ IPv6ExtHdrSegmentRouting(addresses=sidlist,
+ segleft=segleft, nh=143) / \
+ eth
+ return p
+
+ def get_payload_info(self, packet):
+ """ Extract the payload_info from the packet
+ """
+ # in most cases, payload_info is in packet[Raw]
+ # but packet[Raw] gives the complete payload
+ # (incl L2 header) for the T.Encaps L2 case
+ try:
+ payload_info = self.payload_to_info(packet[Raw])
+
+ except:
+ # remote L2 header from packet[Raw]:
+ # take packet[Raw], convert it to an Ether layer
+ # and then extract Raw from it
+ payload_info = self.payload_to_info(
+ Ether(scapy.compat.raw(packet[Raw]))[Raw])
+
+ return payload_info
+
+ def verify_captured_pkts(self, dst_if, capture, compare_func):
+ """
+ Verify captured packet stream for specified interface.
+ Compare ingress with egress packets using the specified compare fn
+
+ :param dst_if: egress interface of DUT
+ :param capture: captured packets
+ :param compare_func: function to compare in and out packet
+ """
+ self.logger.info("Verifying capture on interface %s using function %s"
+ % (dst_if.name, compare_func.__name__))
+
+ last_info = dict()
+ for i in self.pg_interfaces:
+ last_info[i.sw_if_index] = None
+ dst_sw_if_index = dst_if.sw_if_index
+
+ for packet in capture:
+ try:
+ # extract payload_info from packet's payload
+ payload_info = self.get_payload_info(packet)
+ packet_index = payload_info.index
+
+ self.logger.debug("Verifying packet with index %d"
+ % (packet_index))
+ # packet should have arrived on the expected interface
+ self.assertEqual(payload_info.dst, dst_sw_if_index)
+ self.logger.debug(
+ "Got packet on interface %s: src=%u (idx=%u)" %
+ (dst_if.name, payload_info.src, packet_index))
+
+ # search for payload_info with same src and dst if_index
+ # this will give us the transmitted packet
+ next_info = self.get_next_packet_info_for_interface2(
+ payload_info.src, dst_sw_if_index,
+ last_info[payload_info.src])
+ last_info[payload_info.src] = next_info
+ # next_info should not be None
+ self.assertTrue(next_info is not None)
+ # index of tx and rx packets should be equal
+ self.assertEqual(packet_index, next_info.index)
+ # data field of next_info contains the tx packet
+ txed_packet = next_info.data
+
+ self.logger.debug(ppp("Transmitted packet:",
+ txed_packet)) # ppp=Pretty Print Packet
+
+ self.logger.debug(ppp("Received packet:", packet))
+
+ # compare rcvd packet with expected packet using compare_func
+ compare_func(txed_packet, packet)
+
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ # have all expected packets arrived?
+ for i in self.pg_interfaces:
+ remaining_packet = self.get_next_packet_info_for_interface2(
+ i.sw_if_index, dst_sw_if_index, last_info[i.sw_if_index])
+ self.assertTrue(remaining_packet is None,
+ "Interface %s: Packet expected from interface %s "
+ "didn't arrive" % (dst_if.name, i.name))
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_srv6_ad_flow.py b/test/test_srv6_ad_flow.py
new file mode 100644
index 00000000000..f5452089a79
--- /dev/null
+++ b/test/test_srv6_ad_flow.py
@@ -0,0 +1,637 @@
+#!/usr/bin/env python3
+
+import unittest
+import binascii
+from socket import AF_INET6
+
+from framework import VppTestCase, VppTestRunner
+from vpp_ip import DpoProto
+from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpTable
+
+import scapy.compat
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether, Dot1Q
+from scapy.layers.inet6 import IPv6, UDP, IPv6ExtHdrSegmentRouting
+from scapy.layers.inet import IP, UDP
+
+from util import ppp
+
+
+class TestSRv6(VppTestCase):
+ """ SRv6 Flow-based Dynamic Proxy plugin Test Case """
+
+ @classmethod
+ def setUpClass(self):
+ super(TestSRv6, self).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestSRv6, cls).tearDownClass()
+
+ def setUp(self):
+ """ Perform test setup before each test case.
+ """
+ super(TestSRv6, self).setUp()
+
+ # packet sizes, inclusive L2 overhead
+ self.pg_packet_sizes = [64, 512, 1518, 9018]
+
+ # reset packet_infos
+ self.reset_packet_infos()
+
+ def tearDown(self):
+ """ Clean up test setup after each test case.
+ """
+ self.teardown_interfaces()
+
+ super(TestSRv6, self).tearDown()
+
+ def configure_interface(self,
+ interface,
+ ipv6=False, ipv4=False,
+ ipv6_table_id=0, ipv4_table_id=0):
+ """ Configure interface.
+ :param ipv6: configure IPv6 on interface
+ :param ipv4: configure IPv4 on interface
+ :param ipv6_table_id: FIB table_id for IPv6
+ :param ipv4_table_id: FIB table_id for IPv4
+ """
+ self.logger.debug("Configuring interface %s" % (interface.name))
+ if ipv6:
+ self.logger.debug("Configuring IPv6")
+ interface.set_table_ip6(ipv6_table_id)
+ interface.config_ip6()
+ interface.resolve_ndp(timeout=5)
+ if ipv4:
+ self.logger.debug("Configuring IPv4")
+ interface.set_table_ip4(ipv4_table_id)
+ interface.config_ip4()
+ interface.resolve_arp()
+ interface.admin_up()
+
+ def setup_interfaces(self, ipv6=[], ipv4=[],
+ ipv6_table_id=[], ipv4_table_id=[]):
+ """ Create and configure interfaces.
+
+ :param ipv6: list of interface IPv6 capabilities
+ :param ipv4: list of interface IPv4 capabilities
+ :param ipv6_table_id: list of intf IPv6 FIB table_ids
+ :param ipv4_table_id: list of intf IPv4 FIB table_ids
+ :returns: List of created interfaces.
+ """
+ # how many interfaces?
+ if len(ipv6):
+ count = len(ipv6)
+ else:
+ count = len(ipv4)
+ self.logger.debug("Creating and configuring %d interfaces" % (count))
+
+ # fill up ipv6 and ipv4 lists if needed
+ # not enabled (False) is the default
+ if len(ipv6) < count:
+ ipv6 += (count - len(ipv6)) * [False]
+ if len(ipv4) < count:
+ ipv4 += (count - len(ipv4)) * [False]
+
+ # fill up table_id lists if needed
+ # table_id 0 (global) is the default
+ if len(ipv6_table_id) < count:
+ ipv6_table_id += (count - len(ipv6_table_id)) * [0]
+ if len(ipv4_table_id) < count:
+ ipv4_table_id += (count - len(ipv4_table_id)) * [0]
+
+ # create 'count' pg interfaces
+ self.create_pg_interfaces(range(count))
+
+ # setup all interfaces
+ for i in range(count):
+ intf = self.pg_interfaces[i]
+ self.configure_interface(intf,
+ ipv6[i], ipv4[i],
+ ipv6_table_id[i], ipv4_table_id[i])
+
+ if any(ipv6):
+ self.logger.debug(self.vapi.cli("show ip6 neighbors"))
+ if any(ipv4):
+ self.logger.debug(self.vapi.cli("show ip4 neighbors"))
+ self.logger.debug(self.vapi.cli("show interface"))
+ self.logger.debug(self.vapi.cli("show hardware"))
+
+ return self.pg_interfaces
+
+ def teardown_interfaces(self):
+ """ Unconfigure and bring down interface.
+ """
+ self.logger.debug("Tearing down interfaces")
+ # tear down all interfaces
+ # AFAIK they cannot be deleted
+ for i in self.pg_interfaces:
+ self.logger.debug("Tear down interface %s" % (i.name))
+ i.admin_down()
+ i.unconfig()
+ i.set_table_ip4(0)
+ i.set_table_ip6(0)
+
+ def test_SRv6_End_AD_IPv6(self):
+ """ Test SRv6 End.AD behavior with IPv6 traffic.
+ """
+ self.src_addr = 'a0::'
+ self.sid_list = ['a1::', 'a2::a6', 'a3::']
+ self.test_sid_index = 1
+
+ # send traffic to one destination interface
+ # source and destination interfaces are IPv6 only
+ self.setup_interfaces(ipv6=[True, True])
+
+ # configure route to next segment
+ route = VppIpRoute(self, self.sid_list[self.test_sid_index + 1], 128,
+ [VppRoutePath(self.pg0.remote_ip6,
+ self.pg0.sw_if_index,
+ proto=DpoProto.DPO_PROTO_IP6)])
+ route.add_vpp_config()
+
+ # configure SRv6 localSID behavior
+ cli_str = "sr localsid address " + \
+ self.sid_list[self.test_sid_index] + \
+ " behavior end.ad.flow" + \
+ " nh " + self.pg1.remote_ip6 + \
+ " oif " + self.pg1.name + \
+ " iif " + self.pg1.name
+ self.vapi.cli(cli_str)
+
+ # log the localsids
+ self.logger.debug(self.vapi.cli("show sr localsid"))
+
+ # send one packet per packet size
+ count = len(self.pg_packet_sizes)
+
+ # prepare IPv6 in SRv6 headers
+ packet_header1 = self.create_packet_header_IPv6_SRH_IPv6(
+ srcaddr=self.src_addr,
+ sidlist=self.sid_list[::-1],
+ segleft=len(self.sid_list) - self.test_sid_index - 1)
+
+ # generate packets (pg0->pg1)
+ pkts1 = self.create_stream(self.pg0, self.pg1, packet_header1,
+ self.pg_packet_sizes, count)
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg0, pkts1, self.pg1,
+ self.compare_rx_tx_packet_End_AD_IPv6_out)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # prepare IPv6 header for returning packets
+ packet_header2 = self.create_packet_header_IPv6()
+
+ # generate returning packets (pg1->pg0)
+ pkts2 = self.create_stream(self.pg1, self.pg0, packet_header2,
+ self.pg_packet_sizes, count)
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg1, pkts2, self.pg0,
+ self.compare_rx_tx_packet_End_AD_IPv6_in)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # remove SRv6 localSIDs
+ cli_str = "sr localsid del address " + \
+ self.sid_list[self.test_sid_index]
+ self.vapi.cli(cli_str)
+
+ # cleanup interfaces
+ self.teardown_interfaces()
+
+ def compare_rx_tx_packet_End_AD_IPv6_out(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End.AD with IPv6
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+
+ # get first (outer) IPv6 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IPv6)
+
+ tx_ip = tx_pkt.getlayer(IPv6)
+ tx_ip2 = tx_pkt.getlayer(IPv6, 2)
+
+ # verify if rx'ed packet has no SRH
+ self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+
+ # the whole rx_ip pkt should be equal to tx_ip2
+ # except for the hlim field
+ # -> adjust tx'ed hlim to expected hlim
+ tx_ip2.hlim = tx_ip2.hlim - 1
+
+ self.assertEqual(rx_ip, tx_ip2)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def compare_rx_tx_packet_End_AD_IPv6_in(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End.AD
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+
+ # get first (outer) IPv6 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IPv6)
+ # received ip.src should be equal to SR Policy source
+ self.assertEqual(rx_ip.src, self.src_addr)
+ # received ip.dst should be equal to expected sidlist next segment
+ self.assertEqual(rx_ip.dst, self.sid_list[self.test_sid_index + 1])
+
+ # rx'ed packet should have SRH
+ self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+
+ # get SRH
+ rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting)
+ # rx'ed seglist should be equal to SID-list in reversed order
+ self.assertEqual(rx_srh.addresses, self.sid_list[::-1])
+ # segleft should be equal to previous segleft value minus 1
+ self.assertEqual(rx_srh.segleft,
+ len(self.sid_list) - self.test_sid_index - 2)
+ # lastentry should be equal to the SID-list length minus 1
+ self.assertEqual(rx_srh.lastentry, len(self.sid_list) - 1)
+
+ # the whole rx'ed pkt beyond SRH should be equal to tx'ed pkt
+ # except for the hop-limit field
+ tx_ip = tx_pkt.getlayer(IPv6)
+ # -> update tx'ed hlim to the expected hlim
+ tx_ip.hlim -= 1
+ # -> check payload
+ self.assertEqual(rx_srh.payload, tx_ip)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def test_SRv6_End_AD_IPv4(self):
+ """ Test SRv6 End.AD behavior with IPv4 traffic.
+ """
+ self.src_addr = 'a0::'
+ self.sid_list = ['a1::', 'a2::a4', 'a3::']
+ self.test_sid_index = 1
+
+ # send traffic to one destination interface
+ # source and destination interfaces are IPv6 only
+ self.setup_interfaces(ipv6=[True, False], ipv4=[False, True])
+
+ # configure route to next segment
+ route = VppIpRoute(self, self.sid_list[self.test_sid_index + 1], 128,
+ [VppRoutePath(self.pg0.remote_ip6,
+ self.pg0.sw_if_index,
+ proto=DpoProto.DPO_PROTO_IP6)])
+ route.add_vpp_config()
+
+ # configure SRv6 localSID behavior
+ cli_str = "sr localsid address " + \
+ self.sid_list[self.test_sid_index] + \
+ " behavior end.ad.flow" + \
+ " nh " + self.pg1.remote_ip4 + \
+ " oif " + self.pg1.name + \
+ " iif " + self.pg1.name
+ self.vapi.cli(cli_str)
+
+ # log the localsids
+ self.logger.debug(self.vapi.cli("show sr localsid"))
+
+ # send one packet per packet size
+ count = len(self.pg_packet_sizes)
+
+ # prepare IPv4 in SRv6 headers
+ packet_header1 = self.create_packet_header_IPv6_SRH_IPv4(
+ srcaddr=self.src_addr,
+ sidlist=self.sid_list[::-1],
+ segleft=len(self.sid_list) - self.test_sid_index - 1)
+
+ # generate packets (pg0->pg1)
+ pkts1 = self.create_stream(self.pg0, self.pg1, packet_header1,
+ self.pg_packet_sizes, count)
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg0, pkts1, self.pg1,
+ self.compare_rx_tx_packet_End_AD_IPv4_out)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # prepare IPv6 header for returning packets
+ packet_header2 = self.create_packet_header_IPv4()
+
+ # generate returning packets (pg1->pg0)
+ pkts2 = self.create_stream(self.pg1, self.pg0, packet_header2,
+ self.pg_packet_sizes, count)
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg1, pkts2, self.pg0,
+ self.compare_rx_tx_packet_End_AD_IPv4_in)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # remove SRv6 localSIDs
+ cli_str = "sr localsid del address " + \
+ self.sid_list[self.test_sid_index]
+ self.vapi.cli(cli_str)
+
+ # cleanup interfaces
+ self.teardown_interfaces()
+
+ def compare_rx_tx_packet_End_AD_IPv4_out(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End.AD with IPv4
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+
+ # get IPv4 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IP)
+
+ tx_ip = tx_pkt.getlayer(IPv6)
+ tx_ip2 = tx_pkt.getlayer(IP)
+
+ # verify if rx'ed packet has no SRH
+ self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+
+ # the whole rx_ip pkt should be equal to tx_ip2
+ # except for the ttl field and ip checksum
+ # -> adjust tx'ed ttl to expected ttl
+ tx_ip2.ttl = tx_ip2.ttl - 1
+ # -> set tx'ed ip checksum to None and let scapy recompute
+ tx_ip2.chksum = None
+ # read back the pkt (with str()) to force computing these fields
+ # probably other ways to accomplish this are possible
+ tx_ip2 = IP(scapy.compat.raw(tx_ip2))
+
+ self.assertEqual(rx_ip, tx_ip2)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def compare_rx_tx_packet_End_AD_IPv4_in(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End.AD
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+
+ # get first (outer) IPv6 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IPv6)
+ # received ip.src should be equal to SR Policy source
+ self.assertEqual(rx_ip.src, self.src_addr)
+ # received ip.dst should be equal to expected sidlist next segment
+ self.assertEqual(rx_ip.dst, self.sid_list[self.test_sid_index + 1])
+
+ # rx'ed packet should have SRH
+ self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+
+ # get SRH
+ rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting)
+ # rx'ed seglist should be equal to SID-list in reversed order
+ self.assertEqual(rx_srh.addresses, self.sid_list[::-1])
+ # segleft should be equal to previous segleft value minus 1
+ self.assertEqual(rx_srh.segleft,
+ len(self.sid_list) - self.test_sid_index - 2)
+ # lastentry should be equal to the SID-list length minus 1
+ self.assertEqual(rx_srh.lastentry, len(self.sid_list) - 1)
+
+ # the whole rx'ed pkt beyond SRH should be equal to tx'ed pkt
+ # except for the ttl field and ip checksum
+ tx_ip = tx_pkt.getlayer(IP)
+ # -> adjust tx'ed ttl to expected ttl
+ tx_ip.ttl = tx_ip.ttl - 1
+ # -> set tx'ed ip checksum to None and let scapy recompute
+ tx_ip.chksum = None
+ # -> read back the pkt (with str()) to force computing these fields
+ # probably other ways to accomplish this are possible
+ self.assertEqual(rx_srh.payload, IP(scapy.compat.raw(tx_ip)))
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def create_stream(self, src_if, dst_if, packet_header, packet_sizes,
+ count):
+ """Create SRv6 input packet stream for defined interface.
+
+ :param VppInterface src_if: Interface to create packet stream for
+ :param VppInterface dst_if: destination interface of packet stream
+ :param packet_header: Layer3 scapy packet headers,
+ L2 is added when not provided,
+ Raw(payload) with packet_info is added
+ :param list packet_sizes: packet stream pckt sizes,sequentially applied
+ to packets in stream have
+ :param int count: number of packets in packet stream
+ :return: list of packets
+ """
+ self.logger.info("Creating packets")
+ pkts = []
+ for i in range(0, count - 1):
+ payload_info = self.create_packet_info(src_if, dst_if)
+ self.logger.debug(
+ "Creating packet with index %d" % (payload_info.index))
+ payload = self.info_to_payload(payload_info)
+ # add L2 header if not yet provided in packet_header
+ if packet_header.getlayer(0).name == 'Ethernet':
+ p = packet_header / Raw(payload)
+ else:
+ p = Ether(dst=src_if.local_mac, src=src_if.remote_mac) / \
+ packet_header / Raw(payload)
+ size = packet_sizes[i % len(packet_sizes)]
+ self.logger.debug("Packet size %d" % (size))
+ self.extend_packet(p, size)
+ # we need to store the packet with the automatic fields computed
+ # read back the dumped packet (with str())
+ # to force computing these fields
+ # probably other ways are possible
+ p = Ether(scapy.compat.raw(p))
+ payload_info.data = p.copy()
+ self.logger.debug(ppp("Created packet:", p))
+ pkts.append(p)
+ self.logger.info("Done creating packets")
+ return pkts
+
+ def send_and_verify_pkts(self, input, pkts, output, compare_func):
+ """Send packets and verify received packets using compare_func
+
+ :param input: ingress interface of DUT
+ :param pkts: list of packets to transmit
+ :param output: egress interface of DUT
+ :param compare_func: function to compare in and out packets
+ """
+ # add traffic stream to input interface
+ input.add_stream(pkts)
+
+ # enable capture on all interfaces
+ self.pg_enable_capture(self.pg_interfaces)
+
+ # start traffic
+ self.logger.info("Starting traffic")
+ self.pg_start()
+
+ # get output capture
+ self.logger.info("Getting packet capture")
+ capture = output.get_capture()
+
+ # assert nothing was captured on input interface
+ # input.assert_nothing_captured()
+
+ # verify captured packets
+ self.verify_captured_pkts(output, capture, compare_func)
+
+ def create_packet_header_IPv6(self, saddr='1234::1', daddr='4321::1',
+ sport=1234, dport=1234):
+ """Create packet header: IPv6 header, UDP header
+
+ :param dst: IPv6 destination address
+
+ IPv6 source address is 1234::1
+ IPv6 destination address is 4321::1
+ UDP source port and destination port are 1234
+ """
+
+ p = IPv6(src=saddr, dst=daddr) / UDP(sport=sport, dport=dport)
+ return p
+
+ def create_packet_header_IPv6_SRH_IPv6(self, srcaddr, sidlist, segleft,
+ insrc='1234::1', indst='4321::1',
+ sport=1234, dport=1234):
+ """Create packet header: IPv6 encapsulated in SRv6:
+ IPv6 header with SRH, IPv6 header, UDP header
+
+ :param int srcaddr: outer source address
+ :param list sidlist: segment list of outer IPv6 SRH
+ :param int segleft: segments-left field of outer IPv6 SRH
+
+ Outer IPv6 source address is set to srcaddr
+ Outer IPv6 destination address is set to sidlist[segleft]
+ Inner IPv6 source addresses is 1234::1
+ Inner IPv6 destination address is 4321::1
+ UDP source port and destination port are 1234
+ """
+
+ p = IPv6(src=srcaddr, dst=sidlist[segleft]) / \
+ IPv6ExtHdrSegmentRouting(addresses=sidlist,
+ segleft=segleft, nh=41) / \
+ IPv6(src=insrc, dst=indst) / \
+ UDP(sport=sport, dport=dport)
+ return p
+
+ def create_packet_header_IPv4(self):
+ """Create packet header: IPv4 header, UDP header
+
+ :param dst: IPv4 destination address
+
+ IPv4 source address is 123.1.1.1
+ IPv4 destination address is 124.1.1.1
+ UDP source port and destination port are 1234
+ """
+
+ p = IP(src='123.1.1.1', dst='124.1.1.1') / UDP(sport=1234, dport=1234)
+ return p
+
+ def create_packet_header_IPv6_SRH_IPv4(self, srcaddr, sidlist, segleft):
+ """Create packet header: IPv4 encapsulated in SRv6:
+ IPv6 header with SRH, IPv4 header, UDP header
+
+ :param int srcaddr: outer source address
+ :param list sidlist: segment list of outer IPv6 SRH
+ :param int segleft: segments-left field of outer IPv6 SRH
+
+ Outer IPv6 source address is set to srcaddr
+ Outer IPv6 destination address is set to sidlist[segleft]
+ Inner IPv4 source address is 123.1.1.1
+ Inner IPv4 destination address is 124.1.1.1
+ UDP source port and destination port are 1234
+ """
+
+ p = IPv6(src=srcaddr, dst=sidlist[segleft]) / \
+ IPv6ExtHdrSegmentRouting(addresses=sidlist,
+ segleft=segleft, nh=4) / \
+ IP(src='123.1.1.1', dst='124.1.1.1') / \
+ UDP(sport=1234, dport=1234)
+ return p
+
+ def get_payload_info(self, packet):
+ """ Extract the payload_info from the packet
+ """
+ # in most cases, payload_info is in packet[Raw]
+ # but packet[Raw] gives the complete payload
+ # (incl L2 header) for the T.Encaps L2 case
+ try:
+ payload_info = self.payload_to_info(packet[Raw])
+
+ except:
+ # remote L2 header from packet[Raw]:
+ # take packet[Raw], convert it to an Ether layer
+ # and then extract Raw from it
+ payload_info = self.payload_to_info(
+ Ether(scapy.compat.raw(packet[Raw]))[Raw])
+
+ return payload_info
+
+ def verify_captured_pkts(self, dst_if, capture, compare_func):
+ """
+ Verify captured packet stream for specified interface.
+ Compare ingress with egress packets using the specified compare fn
+
+ :param dst_if: egress interface of DUT
+ :param capture: captured packets
+ :param compare_func: function to compare in and out packet
+ """
+ self.logger.info("Verifying capture on interface %s using function %s"
+ % (dst_if.name, compare_func.__name__))
+
+ last_info = dict()
+ for i in self.pg_interfaces:
+ last_info[i.sw_if_index] = None
+ dst_sw_if_index = dst_if.sw_if_index
+
+ for packet in capture:
+ try:
+ # extract payload_info from packet's payload
+ payload_info = self.get_payload_info(packet)
+ packet_index = payload_info.index
+
+ self.logger.debug("Verifying packet with index %d"
+ % (packet_index))
+ # packet should have arrived on the expected interface
+ self.assertEqual(payload_info.dst, dst_sw_if_index)
+ self.logger.debug(
+ "Got packet on interface %s: src=%u (idx=%u)" %
+ (dst_if.name, payload_info.src, packet_index))
+
+ # search for payload_info with same src and dst if_index
+ # this will give us the transmitted packet
+ next_info = self.get_next_packet_info_for_interface2(
+ payload_info.src, dst_sw_if_index,
+ last_info[payload_info.src])
+ last_info[payload_info.src] = next_info
+ # next_info should not be None
+ self.assertTrue(next_info is not None)
+ # index of tx and rx packets should be equal
+ self.assertEqual(packet_index, next_info.index)
+ # data field of next_info contains the tx packet
+ txed_packet = next_info.data
+
+ self.logger.debug(ppp("Transmitted packet:",
+ txed_packet)) # ppp=Pretty Print Packet
+
+ self.logger.debug(ppp("Received packet:", packet))
+
+ # compare rcvd packet with expected packet using compare_func
+ compare_func(txed_packet, packet)
+
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ # have all expected packets arrived?
+ for i in self.pg_interfaces:
+ remaining_packet = self.get_next_packet_info_for_interface2(
+ i.sw_if_index, dst_sw_if_index, last_info[i.sw_if_index])
+ self.assertTrue(remaining_packet is None,
+ "Interface %s: Packet expected from interface %s "
+ "didn't arrive" % (dst_if.name, i.name))
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_srv6_as.py b/test/test_srv6_as.py
new file mode 100755
index 00000000000..eec44e31ee5
--- /dev/null
+++ b/test/test_srv6_as.py
@@ -0,0 +1,887 @@
+#!/usr/bin/env python3
+
+import unittest
+import binascii
+from socket import AF_INET6
+
+from framework import VppTestCase, VppTestRunner
+from vpp_ip_route import VppIpRoute, VppRoutePath, FibPathProto, VppIpTable
+from vpp_srv6 import SRv6LocalSIDBehaviors, VppSRv6LocalSID, VppSRv6Policy, \
+ SRv6PolicyType, VppSRv6Steering, SRv6PolicySteeringTypes
+
+import scapy.compat
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether, Dot1Q
+from scapy.layers.inet6 import IPv6, UDP, IPv6ExtHdrSegmentRouting
+from scapy.layers.inet import IP, UDP
+
+from util import ppp
+
+
+class TestSRv6(VppTestCase):
+ """ SRv6 Static Proxy plugin Test Case """
+
+ @classmethod
+ def setUpClass(self):
+ super(TestSRv6, self).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestSRv6, cls).tearDownClass()
+
+ def setUp(self):
+ """ Perform test setup before each test case.
+ """
+ super(TestSRv6, self).setUp()
+
+ # packet sizes, inclusive L2 overhead
+ self.pg_packet_sizes = [64, 512, 1518, 9018]
+
+ # reset packet_infos
+ self.reset_packet_infos()
+
+ def tearDown(self):
+ """ Clean up test setup after each test case.
+ """
+ self.teardown_interfaces()
+
+ super(TestSRv6, self).tearDown()
+
+ def configure_interface(self,
+ interface,
+ ipv6=False, ipv4=False,
+ ipv6_table_id=0, ipv4_table_id=0):
+ """ Configure interface.
+ :param ipv6: configure IPv6 on interface
+ :param ipv4: configure IPv4 on interface
+ :param ipv6_table_id: FIB table_id for IPv6
+ :param ipv4_table_id: FIB table_id for IPv4
+ """
+ self.logger.debug("Configuring interface %s" % (interface.name))
+ if ipv6:
+ self.logger.debug("Configuring IPv6")
+ interface.set_table_ip6(ipv6_table_id)
+ interface.config_ip6()
+ interface.resolve_ndp(timeout=5)
+ if ipv4:
+ self.logger.debug("Configuring IPv4")
+ interface.set_table_ip4(ipv4_table_id)
+ interface.config_ip4()
+ interface.resolve_arp()
+ interface.admin_up()
+
+ def setup_interfaces(self, ipv6=[], ipv4=[],
+ ipv6_table_id=[], ipv4_table_id=[]):
+ """ Create and configure interfaces.
+
+ :param ipv6: list of interface IPv6 capabilities
+ :param ipv4: list of interface IPv4 capabilities
+ :param ipv6_table_id: list of intf IPv6 FIB table_ids
+ :param ipv4_table_id: list of intf IPv4 FIB table_ids
+ :returns: List of created interfaces.
+ """
+ # how many interfaces?
+ if len(ipv6):
+ count = len(ipv6)
+ else:
+ count = len(ipv4)
+ self.logger.debug("Creating and configuring %d interfaces" % (count))
+
+ # fill up ipv6 and ipv4 lists if needed
+ # not enabled (False) is the default
+ if len(ipv6) < count:
+ ipv6 += (count - len(ipv6)) * [False]
+ if len(ipv4) < count:
+ ipv4 += (count - len(ipv4)) * [False]
+
+ # fill up table_id lists if needed
+ # table_id 0 (global) is the default
+ if len(ipv6_table_id) < count:
+ ipv6_table_id += (count - len(ipv6_table_id)) * [0]
+ if len(ipv4_table_id) < count:
+ ipv4_table_id += (count - len(ipv4_table_id)) * [0]
+
+ # create 'count' pg interfaces
+ self.create_pg_interfaces(range(count))
+
+ # setup all interfaces
+ for i in range(count):
+ intf = self.pg_interfaces[i]
+ self.configure_interface(intf,
+ ipv6[i], ipv4[i],
+ ipv6_table_id[i], ipv4_table_id[i])
+
+ if any(ipv6):
+ self.logger.debug(self.vapi.cli("show ip6 neighbors"))
+ if any(ipv4):
+ self.logger.debug(self.vapi.cli("show ip4 neighbors"))
+ self.logger.debug(self.vapi.cli("show interface"))
+ self.logger.debug(self.vapi.cli("show hardware"))
+
+ return self.pg_interfaces
+
+ def teardown_interfaces(self):
+ """ Unconfigure and bring down interface.
+ """
+ self.logger.debug("Tearing down interfaces")
+ # tear down all interfaces
+ # AFAIK they cannot be deleted
+ for i in self.pg_interfaces:
+ self.logger.debug("Tear down interface %s" % (i.name))
+ i.admin_down()
+ i.unconfig()
+ i.set_table_ip4(0)
+ i.set_table_ip6(0)
+
+ def test_SRv6_End_AS_IPv6_noSRH(self):
+ """ Test SRv6 End.AS behavior with IPv6 traffic and no SRH rewrite.
+ """
+ self.run_SRv6_End_AS_IPv6(
+ sid_list=['a1::', 'a2::a6', 'a3::'],
+ test_sid_index=1,
+ rewrite_src_addr='a2::')
+
+ def test_SRv6_End_AS_IPv6_SRH(self):
+ """ Test SRv6 End.AS behavior with IPv6 traffic and SRH rewrite.
+ """
+ self.run_SRv6_End_AS_IPv6(
+ sid_list=['a1::a6', 'a2::', 'a3::'],
+ test_sid_index=0,
+ rewrite_src_addr='a1::')
+
+ def test_SRv6_End_AS_IPv4_noSRH(self):
+ """ Test SRv6 End.AS behavior with IPv4 traffic and no SRH rewrite.
+ """
+ self.run_SRv6_End_AS_IPv4(
+ sid_list=['a1::', 'a2::a6', 'a3::'],
+ test_sid_index=1,
+ rewrite_src_addr='a2::')
+
+ def test_SRv6_End_AS_IPv4_SRH(self):
+ """ Test SRv6 End.AS behavior with IPv4 traffic and SRH rewrite.
+ """
+ self.run_SRv6_End_AS_IPv4(
+ sid_list=['a1::a6', 'a2::', 'a3::'],
+ test_sid_index=0,
+ rewrite_src_addr='a1::')
+
+ def test_SRv6_End_AS_L2_noSRH(self):
+ """ Test SRv6 End.AS behavior with L2 traffic and no SRH rewrite.
+ """
+ self.run_SRv6_End_AS_L2(
+ sid_list=['a1::', 'a2::a6', 'a3::'],
+ test_sid_index=1,
+ rewrite_src_addr='a2::')
+
+ def test_SRv6_End_AS_L2_SRH(self):
+ """ Test SRv6 End.AS behavior with L2 traffic and SRH rewrite.
+ """
+ self.run_SRv6_End_AS_L2(
+ sid_list=['a1::a6', 'a2::', 'a3::'],
+ test_sid_index=0,
+ rewrite_src_addr='a1::')
+
+ def run_SRv6_End_AS_L2(self, sid_list, test_sid_index, rewrite_src_addr):
+ """ Run SRv6 End.AS test with L2 traffic.
+ """
+ self.rewrite_src_addr = rewrite_src_addr
+ self.rewrite_sid_list = sid_list[test_sid_index + 1::]
+
+ # send traffic to one destination interface
+ # source and destination interfaces are IPv6 only
+ self.setup_interfaces(ipv6=[True, False])
+
+ # configure route to next segment
+ route = VppIpRoute(self, sid_list[test_sid_index + 1], 128,
+ [VppRoutePath(self.pg0.remote_ip6,
+ self.pg0.sw_if_index)])
+ route.add_vpp_config()
+
+ # configure SRv6 localSID behavior
+ cli_str = "sr localsid address " + sid_list[test_sid_index] \
+ + " behavior end.as" \
+ + " oif " + self.pg1.name \
+ + " iif " + self.pg1.name \
+ + " src " + self.rewrite_src_addr
+ for s in self.rewrite_sid_list:
+ cli_str += " next " + s
+ self.vapi.cli(cli_str)
+
+ # log the localsids
+ self.logger.debug(self.vapi.cli("show sr localsid"))
+
+ # send one packet per packet size
+ count = len(self.pg_packet_sizes)
+
+ # prepare L2 in SRv6 headers
+ packet_header1 = self.create_packet_header_IPv6_SRH_L2(
+ sidlist=sid_list[::-1],
+ segleft=len(sid_list) - test_sid_index - 1,
+ vlan=0)
+
+ # generate packets (pg0->pg1)
+ pkts1 = self.create_stream(self.pg0, self.pg1, packet_header1,
+ self.pg_packet_sizes, count)
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg0, pkts1, self.pg1,
+ self.compare_rx_tx_packet_End_AS_L2_out)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # prepare L2 header for returning packets
+ packet_header2 = self.create_packet_header_L2()
+
+ # generate returning packets (pg1->pg0)
+ pkts2 = self.create_stream(self.pg1, self.pg0, packet_header2,
+ self.pg_packet_sizes, count)
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg1, pkts2, self.pg0,
+ self.compare_rx_tx_packet_End_AS_L2_in)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # remove SRv6 localSIDs
+ self.vapi.cli("sr localsid del address " + sid_list[test_sid_index])
+
+ # cleanup interfaces
+ self.teardown_interfaces()
+
+ def run_SRv6_End_AS_IPv6(self, sid_list, test_sid_index, rewrite_src_addr):
+ """ Run SRv6 End.AS test with IPv6 traffic.
+ """
+ self.rewrite_src_addr = rewrite_src_addr
+ self.rewrite_sid_list = sid_list[test_sid_index + 1::]
+
+ # send traffic to one destination interface
+ # source and destination interfaces are IPv6 only
+ self.setup_interfaces(ipv6=[True, True])
+
+ # configure route to next segment
+ route = VppIpRoute(self, sid_list[test_sid_index + 1], 128,
+ [VppRoutePath(self.pg0.remote_ip6,
+ self.pg0.sw_if_index)])
+ route.add_vpp_config()
+
+ # configure SRv6 localSID behavior
+ cli_str = "sr localsid address " + sid_list[test_sid_index] \
+ + " behavior end.as" \
+ + " nh " + self.pg1.remote_ip6 \
+ + " oif " + self.pg1.name \
+ + " iif " + self.pg1.name \
+ + " src " + self.rewrite_src_addr
+ for s in self.rewrite_sid_list:
+ cli_str += " next " + s
+ self.vapi.cli(cli_str)
+
+ # log the localsids
+ self.logger.debug(self.vapi.cli("show sr localsid"))
+
+ # send one packet per packet size
+ count = len(self.pg_packet_sizes)
+
+ # prepare IPv6 in SRv6 headers
+ packet_header1 = self.create_packet_header_IPv6_SRH_IPv6(
+ sidlist=sid_list[::-1],
+ segleft=len(sid_list) - test_sid_index - 1)
+
+ # generate packets (pg0->pg1)
+ pkts1 = self.create_stream(self.pg0, self.pg1, packet_header1,
+ self.pg_packet_sizes, count)
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg0, pkts1, self.pg1,
+ self.compare_rx_tx_packet_End_AS_IPv6_out)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # prepare IPv6 header for returning packets
+ packet_header2 = self.create_packet_header_IPv6()
+
+ # generate returning packets (pg1->pg0)
+ pkts2 = self.create_stream(self.pg1, self.pg0, packet_header2,
+ self.pg_packet_sizes, count)
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg1, pkts2, self.pg0,
+ self.compare_rx_tx_packet_End_AS_IPv6_in)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # remove SRv6 localSIDs
+ self.vapi.cli("sr localsid del address " + sid_list[test_sid_index])
+
+ # cleanup interfaces
+ self.teardown_interfaces()
+
+ def run_SRv6_End_AS_IPv4(self, sid_list, test_sid_index, rewrite_src_addr):
+ """ Run SRv6 End.AS test with IPv4 traffic.
+ """
+ self.rewrite_src_addr = rewrite_src_addr
+ self.rewrite_sid_list = sid_list[test_sid_index + 1::]
+
+ # send traffic to one destination interface
+ # source and destination interfaces are IPv6 only
+ self.setup_interfaces(ipv6=[True, False], ipv4=[True, True])
+
+ # configure route to next segment
+ route = VppIpRoute(self, sid_list[test_sid_index + 1], 128,
+ [VppRoutePath(self.pg0.remote_ip6,
+ self.pg0.sw_if_index)])
+ route.add_vpp_config()
+
+ # configure SRv6 localSID behavior
+ cli_str = "sr localsid address " + sid_list[test_sid_index] \
+ + " behavior end.as" \
+ + " nh " + self.pg1.remote_ip4 \
+ + " oif " + self.pg1.name \
+ + " iif " + self.pg1.name \
+ + " src " + self.rewrite_src_addr
+ for s in self.rewrite_sid_list:
+ cli_str += " next " + s
+ self.vapi.cli(cli_str)
+
+ # log the localsids
+ self.logger.debug(self.vapi.cli("show sr localsid"))
+
+ # send one packet per packet size
+ count = len(self.pg_packet_sizes)
+
+ # prepare IPv4 in SRv6 headers
+ packet_header1 = self.create_packet_header_IPv6_SRH_IPv4(
+ sidlist=sid_list[::-1],
+ segleft=len(sid_list) - test_sid_index - 1)
+
+ # generate packets (pg0->pg1)
+ pkts1 = self.create_stream(self.pg0, self.pg1, packet_header1,
+ self.pg_packet_sizes, count)
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg0, pkts1, self.pg1,
+ self.compare_rx_tx_packet_End_AS_IPv4_out)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # prepare IPv6 header for returning packets
+ packet_header2 = self.create_packet_header_IPv4()
+
+ # generate returning packets (pg1->pg0)
+ pkts2 = self.create_stream(self.pg1, self.pg0, packet_header2,
+ self.pg_packet_sizes, count)
+
+ # send packets and verify received packets
+ self.send_and_verify_pkts(self.pg1, pkts2, self.pg0,
+ self.compare_rx_tx_packet_End_AS_IPv4_in)
+
+ # log the localsid counters
+ self.logger.info(self.vapi.cli("show sr localsid"))
+
+ # remove SRv6 localSIDs
+ self.vapi.cli("sr localsid del address " + sid_list[test_sid_index])
+
+ # cleanup interfaces
+ self.teardown_interfaces()
+
+ def compare_rx_tx_packet_End_AS_IPv6_in(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End.AS
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+
+ # get first (outer) IPv6 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IPv6)
+ rx_srh = None
+
+ tx_ip = tx_pkt.getlayer(IPv6)
+
+ # expected segment-list (SRH order)
+ tx_seglist = self.rewrite_sid_list[::-1]
+
+ # received ip.src should be equal to SR Policy source
+ self.assertEqual(rx_ip.src, self.rewrite_src_addr)
+ # received ip.dst should be equal to expected sidlist[lastentry]
+ self.assertEqual(rx_ip.dst, tx_seglist[-1])
+
+ if len(tx_seglist) > 1:
+ # rx'ed packet should have SRH
+ self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+ # get SRH
+ rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting)
+ # rx'ed seglist should be equal to expected seglist
+ self.assertEqual(rx_srh.addresses, tx_seglist)
+ # segleft should be equal to size expected seglist-1
+ self.assertEqual(rx_srh.segleft, len(tx_seglist)-1)
+ # segleft should be equal to lastentry
+ self.assertEqual(rx_srh.segleft, rx_srh.lastentry)
+ # get payload
+ payload = rx_srh.payload
+ else:
+ # rx'ed packet should NOT have SRH
+ self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+ # get payload
+ payload = rx_ip.payload
+
+ # the whole rx'ed pkt beyond SRH should be equal to tx'ed pkt
+ # except for the hop-limit field
+ # -> update tx'ed hlim to the expected hlim
+ tx_ip.hlim = tx_ip.hlim - 1
+
+ self.assertEqual(payload, tx_ip)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def compare_rx_tx_packet_End_AS_IPv4_in(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End.AS
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+
+ # get first (outer) IPv6 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IPv6)
+ rx_srh = None
+
+ tx_ip = tx_pkt.getlayer(IP)
+
+ # expected segment-list (SRH order)
+ tx_seglist = self.rewrite_sid_list[::-1]
+
+ # received ip.src should be equal to SR Policy source
+ self.assertEqual(rx_ip.src, self.rewrite_src_addr)
+ # received ip.dst should be equal to expected sidlist[lastentry]
+ self.assertEqual(rx_ip.dst, tx_seglist[-1])
+
+ if len(tx_seglist) > 1:
+ # rx'ed packet should have SRH and IPv4 header
+ self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+ self.assertTrue(rx_ip.payload.haslayer(IP))
+ # get SRH
+ rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting)
+ # rx'ed seglist should be equal to seglist
+ self.assertEqual(rx_srh.addresses, tx_seglist)
+ # segleft should be equal to size seglist-1
+ self.assertEqual(rx_srh.segleft, len(tx_seglist)-1)
+ # segleft should be equal to lastentry
+ self.assertEqual(rx_srh.segleft, rx_srh.lastentry)
+ payload = rx_srh.payload
+ else:
+ # rx'ed packet should NOT have SRH
+ self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+ # get payload
+ payload = rx_ip.payload
+
+ # the whole rx'ed pkt beyond SRH should be equal to tx'ed pkt
+ # except for the ttl field and ip checksum
+ # -> adjust tx'ed ttl to expected ttl
+ tx_ip.ttl = tx_ip.ttl - 1
+ # -> set tx'ed ip checksum to None and let scapy recompute
+ tx_ip.chksum = None
+ # read back the pkt (with str()) to force computing these fields
+ # probably other ways to accomplish this are possible
+ tx_ip = IP(scapy.compat.raw(tx_ip))
+
+ self.assertEqual(payload, tx_ip)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def compare_rx_tx_packet_End_AS_L2_in(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End.AS
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+
+ # get first (outer) IPv6 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IPv6)
+ rx_srh = None
+
+ tx_ether = tx_pkt.getlayer(Ether)
+
+ # expected segment-list (SRH order)
+ tx_seglist = self.rewrite_sid_list[::-1]
+
+ # received ip.src should be equal to SR Policy source
+ self.assertEqual(rx_ip.src, self.rewrite_src_addr)
+ # received ip.dst should be equal to expected sidlist[lastentry]
+ self.assertEqual(rx_ip.dst, tx_seglist[-1])
+
+ if len(tx_seglist) > 1:
+ # rx'ed packet should have SRH
+ self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+ # get SRH
+ rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting)
+ # rx'ed seglist should be equal to seglist
+ self.assertEqual(rx_srh.addresses, tx_seglist)
+ # segleft should be equal to size seglist-1
+ self.assertEqual(rx_srh.segleft, len(tx_seglist)-1)
+ # segleft should be equal to lastentry
+ self.assertEqual(rx_srh.segleft, rx_srh.lastentry)
+ # nh should be "No Next Header" (143)
+ self.assertEqual(rx_srh.nh, 143)
+ # get payload
+ payload = rx_srh.payload
+ else:
+ # rx'ed packet should NOT have SRH
+ self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+ # get payload
+ payload = rx_ip.payload
+
+ # the whole rx'ed pkt beyond SRH should be equal to tx'ed pkt
+ self.assertEqual(Ether(scapy.compat.raw(payload)), tx_ether)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def compare_rx_tx_packet_End_AS_IPv6_out(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End.AS with IPv6
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+
+ # get first (outer) IPv6 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IPv6)
+
+ tx_ip = tx_pkt.getlayer(IPv6)
+ tx_ip2 = tx_pkt.getlayer(IPv6, 2)
+
+ # verify if rx'ed packet has no SRH
+ self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+
+ # the whole rx_ip pkt should be equal to tx_ip2
+ # except for the hlim field
+ # -> adjust tx'ed hlim to expected hlim
+ tx_ip2.hlim = tx_ip2.hlim - 1
+
+ self.assertEqual(rx_ip, tx_ip2)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def compare_rx_tx_packet_End_AS_IPv4_out(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End.AS with IPv4
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+
+ # get IPv4 header of rx'ed packet
+ rx_ip = rx_pkt.getlayer(IP)
+
+ tx_ip = tx_pkt.getlayer(IPv6)
+ tx_ip2 = tx_pkt.getlayer(IP)
+
+ # verify if rx'ed packet has no SRH
+ self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+
+ # the whole rx_ip pkt should be equal to tx_ip2
+ # except for the ttl field and ip checksum
+ # -> adjust tx'ed ttl to expected ttl
+ tx_ip2.ttl = tx_ip2.ttl - 1
+ # -> set tx'ed ip checksum to None and let scapy recompute
+ tx_ip2.chksum = None
+ # read back the pkt (with str()) to force computing these fields
+ # probably other ways to accomplish this are possible
+ tx_ip2 = IP(scapy.compat.raw(tx_ip2))
+
+ self.assertEqual(rx_ip, tx_ip2)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def compare_rx_tx_packet_End_AS_L2_out(self, tx_pkt, rx_pkt):
+ """ Compare input and output packet after passing End.AS with L2
+
+ :param tx_pkt: transmitted packet
+ :param rx_pkt: received packet
+ """
+
+ # get IPv4 header of rx'ed packet
+ rx_eth = rx_pkt.getlayer(Ether)
+
+ tx_ip = tx_pkt.getlayer(IPv6)
+ # we can't just get the 2nd Ether layer
+ # get the Raw content and dissect it as Ether
+ tx_eth1 = Ether(scapy.compat.raw(tx_pkt[Raw]))
+
+ # verify if rx'ed packet has no SRH
+ self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting))
+
+ # the whole rx_eth pkt should be equal to tx_eth1
+ self.assertEqual(rx_eth, tx_eth1)
+
+ self.logger.debug("packet verification: SUCCESS")
+
+ def create_stream(self, src_if, dst_if, packet_header, packet_sizes,
+ count):
+ """Create SRv6 input packet stream for defined interface.
+
+ :param VppInterface src_if: Interface to create packet stream for
+ :param VppInterface dst_if: destination interface of packet stream
+ :param packet_header: Layer3 scapy packet headers,
+ L2 is added when not provided,
+ Raw(payload) with packet_info is added
+ :param list packet_sizes: packet stream pckt sizes,sequentially applied
+ to packets in stream have
+ :param int count: number of packets in packet stream
+ :return: list of packets
+ """
+ self.logger.info("Creating packets")
+ pkts = []
+ for i in range(0, count-1):
+ payload_info = self.create_packet_info(src_if, dst_if)
+ self.logger.debug(
+ "Creating packet with index %d" % (payload_info.index))
+ payload = self.info_to_payload(payload_info)
+ # add L2 header if not yet provided in packet_header
+ if packet_header.getlayer(0).name == 'Ethernet':
+ p = (packet_header /
+ Raw(payload))
+ else:
+ p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ packet_header /
+ Raw(payload))
+ size = packet_sizes[i % len(packet_sizes)]
+ self.logger.debug("Packet size %d" % (size))
+ self.extend_packet(p, size)
+ # we need to store the packet with the automatic fields computed
+ # read back the dumped packet (with str())
+ # to force computing these fields
+ # probably other ways are possible
+ p = Ether(scapy.compat.raw(p))
+ payload_info.data = p.copy()
+ self.logger.debug(ppp("Created packet:", p))
+ pkts.append(p)
+ self.logger.info("Done creating packets")
+ return pkts
+
+ def send_and_verify_pkts(self, input, pkts, output, compare_func):
+ """Send packets and verify received packets using compare_func
+
+ :param input: ingress interface of DUT
+ :param pkts: list of packets to transmit
+ :param output: egress interface of DUT
+ :param compare_func: function to compare in and out packets
+ """
+ # add traffic stream to input interface
+ input.add_stream(pkts)
+
+ # enable capture on all interfaces
+ self.pg_enable_capture(self.pg_interfaces)
+
+ # start traffic
+ self.logger.info("Starting traffic")
+ self.pg_start()
+
+ # get output capture
+ self.logger.info("Getting packet capture")
+ capture = output.get_capture()
+
+ # assert nothing was captured on input interface
+ # input.assert_nothing_captured()
+
+ # verify captured packets
+ self.verify_captured_pkts(output, capture, compare_func)
+
+ def create_packet_header_IPv6(self):
+ """Create packet header: IPv6 header, UDP header
+
+ :param dst: IPv6 destination address
+
+ IPv6 source address is 1234::1
+ IPv6 destination address is 4321::1
+ UDP source port and destination port are 1234
+ """
+
+ p = (IPv6(src='1234::1', dst='4321::1') /
+ UDP(sport=1234, dport=1234))
+ return p
+
+ def create_packet_header_IPv6_SRH_IPv6(self, sidlist, segleft):
+ """Create packet header: IPv6 encapsulated in SRv6:
+ IPv6 header with SRH, IPv6 header, UDP header
+
+ :param list sidlist: segment list of outer IPv6 SRH
+ :param int segleft: segments-left field of outer IPv6 SRH
+
+ Outer IPv6 source address is set to 5678::1
+ Outer IPv6 destination address is set to sidlist[segleft]
+ IPv6 source addresses is 1234::1
+ IPv6 destination address is 4321::1
+ UDP source port and destination port are 1234
+ """
+
+ p = (IPv6(src='5678::1', dst=sidlist[segleft]) /
+ IPv6ExtHdrSegmentRouting(addresses=sidlist,
+ segleft=segleft, nh=41) /
+ IPv6(src='1234::1', dst='4321::1') /
+ UDP(sport=1234, dport=1234))
+ return p
+
+ def create_packet_header_IPv4(self):
+ """Create packet header: IPv4 header, UDP header
+
+ :param dst: IPv4 destination address
+
+ IPv4 source address is 123.1.1.1
+ IPv4 destination address is 124.1.1.1
+ UDP source port and destination port are 1234
+ """
+
+ p = (IP(src='123.1.1.1', dst='124.1.1.1') /
+ UDP(sport=1234, dport=1234))
+ return p
+
+ def create_packet_header_IPv6_SRH_IPv4(self, sidlist, segleft):
+ """Create packet header: IPv4 encapsulated in SRv6:
+ IPv6 header with SRH, IPv4 header, UDP header
+
+ :param ipv4address dst: inner IPv4 destination address
+ :param list sidlist: segment list of outer IPv6 SRH
+ :param int segleft: segments-left field of outer IPv6 SRH
+
+ Outer IPv6 destination address is set to sidlist[segleft]
+ IPv6 source address is 1234::1
+ IPv4 source address is 123.1.1.1
+ IPv4 destination address is 124.1.1.1
+ UDP source port and destination port are 1234
+ """
+
+ p = (IPv6(src='1234::1', dst=sidlist[segleft]) /
+ IPv6ExtHdrSegmentRouting(addresses=sidlist,
+ segleft=segleft, nh=4) /
+ IP(src='123.1.1.1', dst='124.1.1.1') /
+ UDP(sport=1234, dport=1234))
+ return p
+
+ def create_packet_header_L2(self, vlan=0):
+ """Create packet header: L2 header
+
+ :param vlan: if vlan!=0 then add 802.1q header
+ """
+ # Note: the dst addr ('00:55:44:33:22:11') is used in
+ # the compare function compare_rx_tx_packet_T_Encaps_L2
+ # to detect presence of L2 in SRH payload
+ p = Ether(src='00:11:22:33:44:55', dst='00:55:44:33:22:11')
+ etype = 0x8137 # IPX
+ if vlan:
+ # add 802.1q layer
+ p /= Dot1Q(vlan=vlan, type=etype)
+ else:
+ p.type = etype
+ return p
+
+ def create_packet_header_IPv6_SRH_L2(self, sidlist, segleft, vlan=0):
+ """Create packet header: L2 encapsulated in SRv6:
+ IPv6 header with SRH, L2
+
+ :param list sidlist: segment list of outer IPv6 SRH
+ :param int segleft: segments-left field of outer IPv6 SRH
+ :param vlan: L2 vlan; if vlan!=0 then add 802.1q header
+
+ Outer IPv6 destination address is set to sidlist[segleft]
+ IPv6 source address is 1234::1
+ """
+ eth = Ether(src='00:11:22:33:44:55', dst='00:55:44:33:22:11')
+ etype = 0x8137 # IPX
+ if vlan:
+ # add 802.1q layer
+ eth /= Dot1Q(vlan=vlan, type=etype)
+ else:
+ eth.type = etype
+
+ p = (IPv6(src='1234::1', dst=sidlist[segleft]) /
+ IPv6ExtHdrSegmentRouting(addresses=sidlist,
+ segleft=segleft, nh=143) /
+ eth)
+ return p
+
+ def get_payload_info(self, packet):
+ """ Extract the payload_info from the packet
+ """
+ # in most cases, payload_info is in packet[Raw]
+ # but packet[Raw] gives the complete payload
+ # (incl L2 header) for the T.Encaps L2 case
+ try:
+ payload_info = self.payload_to_info(packet[Raw])
+
+ except:
+ # remote L2 header from packet[Raw]:
+ # take packet[Raw], convert it to an Ether layer
+ # and then extract Raw from it
+ payload_info = self.payload_to_info(
+ Ether(scapy.compat.raw(packet[Raw]))[Raw])
+
+ return payload_info
+
+ def verify_captured_pkts(self, dst_if, capture, compare_func):
+ """
+ Verify captured packet stream for specified interface.
+ Compare ingress with egress packets using the specified compare fn
+
+ :param dst_if: egress interface of DUT
+ :param capture: captured packets
+ :param compare_func: function to compare in and out packet
+ """
+ self.logger.info("Verifying capture on interface %s using function %s"
+ % (dst_if.name, compare_func.__name__))
+
+ last_info = dict()
+ for i in self.pg_interfaces:
+ last_info[i.sw_if_index] = None
+ dst_sw_if_index = dst_if.sw_if_index
+
+ for packet in capture:
+ try:
+ # extract payload_info from packet's payload
+ payload_info = self.get_payload_info(packet)
+ packet_index = payload_info.index
+
+ self.logger.debug("Verifying packet with index %d"
+ % (packet_index))
+ # packet should have arrived on the expected interface
+ self.assertEqual(payload_info.dst, dst_sw_if_index)
+ self.logger.debug(
+ "Got packet on interface %s: src=%u (idx=%u)" %
+ (dst_if.name, payload_info.src, packet_index))
+
+ # search for payload_info with same src and dst if_index
+ # this will give us the transmitted packet
+ next_info = self.get_next_packet_info_for_interface2(
+ payload_info.src, dst_sw_if_index,
+ last_info[payload_info.src])
+ last_info[payload_info.src] = next_info
+ # next_info should not be None
+ self.assertTrue(next_info is not None)
+ # index of tx and rx packets should be equal
+ self.assertEqual(packet_index, next_info.index)
+ # data field of next_info contains the tx packet
+ txed_packet = next_info.data
+
+ self.logger.debug(ppp("Transmitted packet:",
+ txed_packet)) # ppp=Pretty Print Packet
+
+ self.logger.debug(ppp("Received packet:", packet))
+
+ # compare rcvd packet with expected packet using compare_func
+ compare_func(txed_packet, packet)
+
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ # have all expected packets arrived?
+ for i in self.pg_interfaces:
+ remaining_packet = self.get_next_packet_info_for_interface2(
+ i.sw_if_index, dst_sw_if_index, last_info[i.sw_if_index])
+ self.assertTrue(remaining_packet is None,
+ "Interface %s: Packet expected from interface %s "
+ "didn't arrive" % (dst_if.name, i.name))
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_srv6_mobile.py b/test/test_srv6_mobile.py
new file mode 100644
index 00000000000..a695c9d7115
--- /dev/null
+++ b/test/test_srv6_mobile.py
@@ -0,0 +1,340 @@
+#!/usr/bin/env python3
+
+from framework import VppTestCase
+from ipaddress import IPv4Address
+from ipaddress import IPv6Address
+from scapy.contrib.gtp import *
+from scapy.all import *
+
+
+class TestSRv6EndMGTP4E(VppTestCase):
+ """ SRv6 End.M.GTP4.E (SRv6 -> GTP-U) """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestSRv6EndMGTP4E, cls).setUpClass()
+ try:
+ cls.create_pg_interfaces(range(2))
+ cls.pg_if_i = cls.pg_interfaces[0]
+ cls.pg_if_o = cls.pg_interfaces[1]
+
+ cls.pg_if_i.config_ip6()
+ cls.pg_if_o.config_ip4()
+
+ cls.ip4_dst = cls.pg_if_o.remote_ip4
+ # cls.ip4_src = cls.pg_if_o.local_ip4
+ cls.ip4_src = "192.168.192.10"
+
+ for pg_if in cls.pg_interfaces:
+ pg_if.admin_up()
+ pg_if.resolve_arp()
+
+ except Exception:
+ super(TestSRv6EndMGTP4E, cls).tearDownClass()
+ raise
+
+ def create_packets(self, inner):
+
+ ip4_dst = IPv4Address(str(self.ip4_dst))
+ # 32bit prefix + 32bit IPv4 DA + 8bit + 32bit TEID + 24bit
+ dst = b'\xaa' * 4 + ip4_dst.packed + \
+ b'\x11' + b'\xbb' * 4 + b'\x11' * 3
+ ip6_dst = IPv6Address(dst)
+
+ ip4_src = IPv4Address(str(self.ip4_src))
+ # 64bit prefix + 32bit IPv4 SA + 16 bit port + 16bit
+ src = b'\xcc' * 8 + ip4_src.packed + \
+ b'\xdd' * 2 + b'\x11' * 2
+ ip6_src = IPv6Address(src)
+
+ self.logger.info("ip4 dst: {}".format(ip4_dst))
+ self.logger.info("ip4 src: {}".format(ip4_src))
+ self.logger.info("ip6 dst (remote srgw): {}".format(ip6_dst))
+ self.logger.info("ip6 src (local srgw): {}".format(ip6_src))
+
+ pkts = list()
+ for d, s in inner:
+ pkt = (Ether() /
+ IPv6(dst=str(ip6_dst), src=str(ip6_src)) /
+ IPv6ExtHdrSegmentRouting() /
+ IPv6(dst=d, src=s) /
+ UDP(sport=1000, dport=23))
+ self.logger.info(pkt.show2(dump=True))
+ pkts.append(pkt)
+
+ return pkts
+
+ def test_srv6_mobile(self):
+ """ test_srv6_mobile """
+ pkts = self.create_packets([("A::1", "B::1"), ("C::1", "D::1")])
+
+ self.vapi.cli(
+ "sr localsid address {} behavior end.m.gtp4.e v4src_position 64"
+ .format(pkts[0]['IPv6'].dst))
+ self.logger.info(self.vapi.cli("show sr localsids"))
+
+ self.vapi.cli("clear errors")
+
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ self.logger.info(self.vapi.cli("show errors"))
+ self.logger.info(self.vapi.cli("show int address"))
+
+ capture = self.pg1.get_capture(len(pkts))
+
+ for pkt in capture:
+ self.logger.info(pkt.show2(dump=True))
+ self.assertEqual(pkt[IP].dst, self.ip4_dst)
+ self.assertEqual(pkt[IP].src, self.ip4_src)
+ self.assertEqual(pkt[GTP_U_Header].teid, 0xbbbbbbbb)
+
+
+class TestSRv6TMGTP4D(VppTestCase):
+ """ SRv6 T.M.GTP4.D (GTP-U -> SRv6) """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestSRv6TMGTP4D, cls).setUpClass()
+ try:
+ cls.create_pg_interfaces(range(2))
+ cls.pg_if_i = cls.pg_interfaces[0]
+ cls.pg_if_o = cls.pg_interfaces[1]
+
+ cls.pg_if_i.config_ip4()
+ cls.pg_if_i.config_ip6()
+ cls.pg_if_o.config_ip4()
+ cls.pg_if_o.config_ip6()
+
+ cls.ip4_dst = "1.1.1.1"
+ cls.ip4_src = "2.2.2.2"
+
+ cls.ip6_dst = cls.pg_if_o.remote_ip6
+
+ for pg_if in cls.pg_interfaces:
+ pg_if.admin_up()
+ pg_if.resolve_arp()
+ pg_if.resolve_ndp(timeout=5)
+
+ except Exception:
+ super(TestSRv6TMGTP4D, cls).tearDownClass()
+ raise
+
+ def create_packets(self, inner):
+
+ ip4_dst = IPv4Address(str(self.ip4_dst))
+
+ ip4_src = IPv4Address(str(self.ip4_src))
+
+ self.logger.info("ip4 dst: {}".format(ip4_dst))
+ self.logger.info("ip4 src: {}".format(ip4_src))
+
+ pkts = list()
+ for d, s in inner:
+ pkt = (Ether() /
+ IP(dst=str(ip4_dst), src=str(ip4_src)) /
+ UDP(sport=2152, dport=2152) /
+ GTP_U_Header(gtp_type="g_pdu", teid=200) /
+ IPv6(dst=d, src=s) /
+ UDP(sport=1000, dport=23))
+ self.logger.info(pkt.show2(dump=True))
+ pkts.append(pkt)
+
+ return pkts
+
+ def test_srv6_mobile(self):
+ """ test_srv6_mobile """
+ pkts = self.create_packets([("A::1", "B::1"), ("C::1", "D::1")])
+
+ self.vapi.cli("set sr encaps source addr A1::1")
+ self.vapi.cli("sr policy add bsid D4:: next D2:: next D3::")
+ self.vapi.cli(
+ "sr policy add bsid D5:: behavior t.m.gtp4.d"
+ "D4::/32 v6src_prefix C1::/64 nhtype ipv6")
+ self.vapi.cli("sr steer l3 {}/32 via bsid D5::".format(self.ip4_dst))
+ self.vapi.cli("ip route add D2::/32 via {}".format(self.ip6_dst))
+
+ self.logger.info(self.vapi.cli("show sr steer"))
+ self.logger.info(self.vapi.cli("show sr policies"))
+
+ self.vapi.cli("clear errors")
+
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ self.logger.info(self.vapi.cli("show errors"))
+ self.logger.info(self.vapi.cli("show int address"))
+
+ capture = self.pg1.get_capture(len(pkts))
+
+ for pkt in capture:
+ self.logger.info(pkt.show2(dump=True))
+ self.logger.info("GTP4.D Address={}".format(
+ str(pkt[IPv6ExtHdrSegmentRouting].addresses[0])))
+ self.assertEqual(
+ str(pkt[IPv6ExtHdrSegmentRouting].addresses[0]),
+ "d4:0:101:101::c800:0")
+
+
+class TestSRv6EndMGTP6E(VppTestCase):
+ """ SRv6 End.M.GTP6.E """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestSRv6EndMGTP6E, cls).setUpClass()
+ try:
+ cls.create_pg_interfaces(range(2))
+ cls.pg_if_i = cls.pg_interfaces[0]
+ cls.pg_if_o = cls.pg_interfaces[1]
+
+ cls.pg_if_i.config_ip6()
+ cls.pg_if_o.config_ip6()
+
+ cls.ip6_nhop = cls.pg_if_o.remote_ip6
+
+ for pg_if in cls.pg_interfaces:
+ pg_if.admin_up()
+ pg_if.resolve_ndp(timeout=5)
+
+ except Exception:
+ super(TestSRv6EndMGTP6E, cls).tearDownClass()
+ raise
+
+ def create_packets(self, inner):
+ # 64bit prefix + 8bit QFI + 32bit TEID + 24bit
+ dst = b'\xaa' * 8 + b'\x00' + \
+ b'\xbb' * 4 + b'\x00' * 3
+ ip6_dst = IPv6Address(dst)
+
+ self.ip6_dst = ip6_dst
+
+ src = b'\xcc' * 8 + \
+ b'\xdd' * 4 + b'\x11' * 4
+ ip6_src = IPv6Address(src)
+
+ self.ip6_src = ip6_src
+
+ pkts = list()
+ for d, s in inner:
+ pkt = (Ether() /
+ IPv6(dst=str(ip6_dst),
+ src=str(ip6_src)) /
+ IPv6ExtHdrSegmentRouting(segleft=1,
+ lastentry=0,
+ tag=0,
+ addresses=["a1::1"]) /
+ IPv6(dst=d, src=s) / UDP(sport=1000, dport=23))
+ self.logger.info(pkt.show2(dump=True))
+ pkts.append(pkt)
+
+ return pkts
+
+ def test_srv6_mobile(self):
+ """ test_srv6_mobile """
+ pkts = self.create_packets([("A::1", "B::1"), ("C::1", "D::1")])
+
+ self.vapi.cli(
+ "sr localsid prefix {}/64 behavior end.m.gtp6.e"
+ .format(pkts[0]['IPv6'].dst))
+ self.vapi.cli(
+ "ip route add a1::/64 via {}".format(self.ip6_nhop))
+ self.logger.info(self.vapi.cli("show sr localsids"))
+
+ self.vapi.cli("clear errors")
+
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ self.logger.info(self.vapi.cli("show errors"))
+ self.logger.info(self.vapi.cli("show int address"))
+
+ capture = self.pg1.get_capture(len(pkts))
+
+ for pkt in capture:
+ self.logger.info(pkt.show2(dump=True))
+ self.assertEqual(pkt[IPv6].dst, "a1::1")
+ self.assertEqual(pkt[IPv6].src, str(self.ip6_src))
+ self.assertEqual(pkt[GTP_U_Header].teid, 0xbbbbbbbb)
+
+
+class TestSRv6EndMGTP6D(VppTestCase):
+ """ SRv6 End.M.GTP6.D """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestSRv6EndMGTP6D, cls).setUpClass()
+ try:
+ cls.create_pg_interfaces(range(2))
+ cls.pg_if_i = cls.pg_interfaces[0]
+ cls.pg_if_o = cls.pg_interfaces[1]
+
+ cls.pg_if_i.config_ip6()
+ cls.pg_if_o.config_ip6()
+
+ cls.ip6_nhop = cls.pg_if_o.remote_ip6
+
+ cls.ip6_dst = "2001::1"
+ cls.ip6_src = "2002::1"
+
+ for pg_if in cls.pg_interfaces:
+ pg_if.admin_up()
+ pg_if.resolve_ndp(timeout=5)
+
+ except Exception:
+ super(TestSRv6EndMGTP6D, cls).tearDownClass()
+ raise
+
+ def create_packets(self, inner):
+
+ ip6_dst = IPv6Address(str(self.ip6_dst))
+
+ ip6_src = IPv6Address(str(self.ip6_src))
+
+ self.logger.info("ip6 dst: {}".format(ip6_dst))
+ self.logger.info("ip6 src: {}".format(ip6_src))
+
+ pkts = list()
+ for d, s in inner:
+ pkt = (Ether() /
+ IPv6(dst=str(ip6_dst), src=str(ip6_src)) /
+ UDP(sport=2152, dport=2152) /
+ GTP_U_Header(gtp_type="g_pdu", teid=200) /
+ IPv6(dst=d, src=s) /
+ UDP(sport=1000, dport=23))
+ self.logger.info(pkt.show2(dump=True))
+ pkts.append(pkt)
+
+ return pkts
+
+ def test_srv6_mobile(self):
+ """ test_srv6_mobile """
+ pkts = self.create_packets([("A::1", "B::1"), ("C::1", "D::1")])
+
+ self.vapi.cli("set sr encaps source addr A1::1")
+ self.vapi.cli("sr policy add bsid D4:: next D2:: next D3::")
+ self.vapi.cli(
+ "sr localsid prefix 2001::/64 behavior end.m.gtp6.d D4::/64")
+ self.vapi.cli("ip route add D2::/64 via {}".format(self.ip6_nhop))
+
+ self.logger.info(self.vapi.cli("show sr policies"))
+
+ self.vapi.cli("clear errors")
+
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ self.logger.info(self.vapi.cli("show errors"))
+ self.logger.info(self.vapi.cli("show int address"))
+
+ capture = self.pg1.get_capture(len(pkts))
+
+ for pkt in capture:
+ self.logger.info(pkt.show2(dump=True))
+ self.logger.info("GTP6.D Address={}".format(
+ str(pkt[IPv6ExtHdrSegmentRouting].addresses[0])))
+ self.assertEqual(
+ str(pkt[IPv6ExtHdrSegmentRouting].addresses[0]), "d4::c800:0")
diff --git a/test/test_svs.py b/test/test_svs.py
new file mode 100644
index 00000000000..db4ad8078e0
--- /dev/null
+++ b/test/test_svs.py
@@ -0,0 +1,342 @@
+#!/usr/bin/env python3
+
+import unittest
+
+from framework import VppTestCase, VppTestRunner
+from vpp_ip_route import VppIpTable
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, UDP, ICMP
+from scapy.layers.inet6 import IPv6
+
+from vpp_papi import VppEnum
+
+NUM_PKTS = 67
+
+
+class TestSVS(VppTestCase):
+ """ SVS Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestSVS, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestSVS, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestSVS, self).setUp()
+
+ # create 2 pg interfaces
+ self.create_pg_interfaces(range(4))
+
+ table_id = 0
+
+ for i in self.pg_interfaces:
+ i.admin_up()
+
+ if table_id != 0:
+ tbl = VppIpTable(self, table_id)
+ tbl.add_vpp_config()
+ tbl = VppIpTable(self, table_id, is_ip6=1)
+ tbl.add_vpp_config()
+
+ i.set_table_ip4(table_id)
+ i.set_table_ip6(table_id)
+ i.config_ip4()
+ i.resolve_arp()
+ i.config_ip6()
+ i.resolve_ndp()
+ table_id += 1
+
+ def tearDown(self):
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.unconfig_ip6()
+ i.set_table_ip4(0)
+ i.set_table_ip6(0)
+ i.admin_down()
+ super(TestSVS, self).tearDown()
+
+ def test_svs4(self):
+ """ Source VRF Select IP4 """
+
+ #
+ # packets destined out of the 3 non-default table interfaces
+ #
+ pkts_0 = [(Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src="1.1.1.1", dst=self.pg1.remote_ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src="2.2.2.2", dst=self.pg2.remote_ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src="3.3.3.3", dst=self.pg3.remote_ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))]
+ pkts_1 = [(Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IP(src="1.1.1.1", dst=self.pg1.remote_ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IP(src="2.2.2.2", dst=self.pg2.remote_ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IP(src="3.3.3.3", dst=self.pg3.remote_ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))]
+
+ #
+ # before adding the SVS config all these packets are dropped when
+ # ingressing on pg0 since pg0 is in the default table
+ #
+ for p in pkts_0:
+ self.send_and_assert_no_replies(self.pg0, p * 1)
+
+ #
+ # Add table 1001 & 1002 into which we'll add the routes
+ # determining the source VRF selection
+ #
+ table_ids = [101, 102]
+
+ for table_id in table_ids:
+ self.vapi.svs_table_add_del(
+ is_add=1,
+ af=VppEnum.vl_api_address_family_t.ADDRESS_IP4,
+ table_id=table_id)
+
+ #
+ # map X.0.0.0/8 to each SVS table for lookup in table X
+ #
+ for i in range(1, 4):
+ self.vapi.svs_route_add_del(
+ is_add=1,
+ prefix="%d.0.0.0/8" % i,
+ table_id=table_id,
+ source_table_id=i)
+
+ #
+ # Enable SVS on pg0/pg1 using table 1001/1002
+ #
+ self.vapi.svs_enable_disable(
+ is_enable=1,
+ af=VppEnum.vl_api_address_family_t.ADDRESS_IP4,
+ table_id=table_ids[0],
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.svs_enable_disable(
+ is_enable=1,
+ af=VppEnum.vl_api_address_family_t.ADDRESS_IP4,
+ table_id=table_ids[1],
+ sw_if_index=self.pg1.sw_if_index)
+
+ #
+ # now all the packets should be delivered out the respective interface
+ #
+ self.send_and_expect(self.pg0, pkts_0[0] * NUM_PKTS, self.pg1)
+ self.send_and_expect(self.pg0, pkts_0[1] * NUM_PKTS, self.pg2)
+ self.send_and_expect(self.pg0, pkts_0[2] * NUM_PKTS, self.pg3)
+ self.send_and_expect(self.pg1, pkts_1[0] * NUM_PKTS, self.pg1)
+ self.send_and_expect(self.pg1, pkts_1[1] * NUM_PKTS, self.pg2)
+ self.send_and_expect(self.pg1, pkts_1[2] * NUM_PKTS, self.pg3)
+
+ #
+ # check that if the SVS lookup does not match a route the packet
+ # is forwarded using the interface's routing table
+ #
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg0.remote_ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg0)
+
+ p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IP(src=self.pg1.remote_ip4, dst=self.pg1.remote_ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ self.send_and_expect(self.pg1, p * NUM_PKTS, self.pg1)
+
+ #
+ # dump the SVS configs
+ #
+ ss = self.vapi.svs_dump()
+
+ self.assertEqual(ss[0].table_id, table_ids[0])
+ self.assertEqual(ss[0].sw_if_index, self.pg0.sw_if_index)
+ self.assertEqual(ss[0].af, VppEnum.vl_api_address_family_t.ADDRESS_IP4)
+ self.assertEqual(ss[1].table_id, table_ids[1])
+ self.assertEqual(ss[1].sw_if_index, self.pg1.sw_if_index)
+ self.assertEqual(ss[1].af, VppEnum.vl_api_address_family_t.ADDRESS_IP4)
+
+ #
+ # cleanup
+ #
+ self.vapi.svs_enable_disable(
+ is_enable=0,
+ af=VppEnum.vl_api_address_family_t.ADDRESS_IP4,
+ table_id=table_ids[0],
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.svs_enable_disable(
+ is_enable=0,
+ af=VppEnum.vl_api_address_family_t.ADDRESS_IP4,
+ table_id=table_ids[1],
+ sw_if_index=self.pg1.sw_if_index)
+
+ for table_id in table_ids:
+ for i in range(1, 4):
+ self.vapi.svs_route_add_del(
+ is_add=0,
+ prefix="%d.0.0.0/8" % i,
+ table_id=table_id,
+ source_table_id=0)
+
+ self.vapi.svs_table_add_del(
+ is_add=0,
+ af=VppEnum.vl_api_address_family_t.ADDRESS_IP4,
+ table_id=table_id)
+
+ def test_svs6(self):
+ """ Source VRF Select IP6 """
+
+ #
+ # packets destined out of the 3 non-default table interfaces
+ #
+ pkts_0 = [(Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(src="2001:1::1", dst=self.pg1.remote_ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(src="2001:2::1", dst=self.pg2.remote_ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(src="2001:3::1", dst=self.pg3.remote_ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))]
+ pkts_1 = [(Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IPv6(src="2001:1::1", dst=self.pg1.remote_ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IPv6(src="2001:2::1", dst=self.pg2.remote_ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)),
+ (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IPv6(src="2001:3::1", dst=self.pg3.remote_ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))]
+
+ #
+ # before adding the SVS config all these packets are dropped when
+ # ingressing on pg0 since pg0 is in the default table
+ #
+ for p in pkts_0:
+ self.send_and_assert_no_replies(self.pg0, p * 1)
+
+ #
+ # Add table 1001 & 1002 into which we'll add the routes
+ # determining the source VRF selection
+ #
+ table_ids = [101, 102]
+
+ for table_id in table_ids:
+ self.vapi.svs_table_add_del(
+ is_add=1,
+ af=VppEnum.vl_api_address_family_t.ADDRESS_IP6,
+ table_id=table_id)
+
+ #
+ # map X.0.0.0/8 to each SVS table for lookup in table X
+ #
+ for i in range(1, 4):
+ self.vapi.svs_route_add_del(
+ is_add=1,
+ prefix="2001:%d::/32" % i,
+ table_id=table_id,
+ source_table_id=i)
+
+ #
+ # Enable SVS on pg0/pg1 using table 1001/1002
+ #
+ self.vapi.svs_enable_disable(
+ is_enable=1,
+ af=VppEnum.vl_api_address_family_t.ADDRESS_IP6,
+ table_id=table_ids[0],
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.svs_enable_disable(
+ is_enable=1,
+ af=VppEnum.vl_api_address_family_t.ADDRESS_IP6,
+ table_id=table_ids[1],
+ sw_if_index=self.pg1.sw_if_index)
+
+ #
+ # now all the packets should be delivered out the respective interface
+ #
+ self.send_and_expect(self.pg0, pkts_0[0] * NUM_PKTS, self.pg1)
+ self.send_and_expect(self.pg0, pkts_0[1] * NUM_PKTS, self.pg2)
+ self.send_and_expect(self.pg0, pkts_0[2] * NUM_PKTS, self.pg3)
+ self.send_and_expect(self.pg1, pkts_1[0] * NUM_PKTS, self.pg1)
+ self.send_and_expect(self.pg1, pkts_1[1] * NUM_PKTS, self.pg2)
+ self.send_and_expect(self.pg1, pkts_1[2] * NUM_PKTS, self.pg3)
+
+ #
+ # check that if the SVS lookup does not match a route the packet
+ # is forwarded using the interface's routing table
+ #
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IPv6(src=self.pg0.remote_ip6, dst=self.pg0.remote_ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ self.send_and_expect(self.pg0, p * NUM_PKTS, self.pg0)
+
+ p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) /
+ IPv6(src=self.pg1.remote_ip6, dst=self.pg1.remote_ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100))
+ self.send_and_expect(self.pg1, p * NUM_PKTS, self.pg1)
+
+ #
+ # dump the SVS configs
+ #
+ ss = self.vapi.svs_dump()
+
+ self.assertEqual(ss[0].table_id, table_ids[0])
+ self.assertEqual(ss[0].sw_if_index, self.pg0.sw_if_index)
+ self.assertEqual(ss[0].af, VppEnum.vl_api_address_family_t.ADDRESS_IP6)
+ self.assertEqual(ss[1].table_id, table_ids[1])
+ self.assertEqual(ss[1].sw_if_index, self.pg1.sw_if_index)
+ self.assertEqual(ss[1].af, VppEnum.vl_api_address_family_t.ADDRESS_IP6)
+
+ #
+ # cleanup
+ #
+ self.vapi.svs_enable_disable(
+ is_enable=0,
+ af=VppEnum.vl_api_address_family_t.ADDRESS_IP6,
+ table_id=table_ids[0],
+ sw_if_index=self.pg0.sw_if_index)
+ self.vapi.svs_enable_disable(
+ is_enable=0,
+ af=VppEnum.vl_api_address_family_t.ADDRESS_IP6,
+ table_id=table_ids[1],
+ sw_if_index=self.pg1.sw_if_index)
+
+ for table_id in table_ids:
+ for i in range(1, 4):
+ self.vapi.svs_route_add_del(
+ is_add=0,
+ prefix="2001:%d::/32" % i,
+ table_id=table_id,
+ source_table_id=0)
+
+ self.vapi.svs_table_add_del(
+ is_add=0,
+ af=VppEnum.vl_api_address_family_t.ADDRESS_IP6,
+ table_id=table_id)
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_urpf.py b/test/test_urpf.py
new file mode 100644
index 00000000000..8f4e563f8bc
--- /dev/null
+++ b/test/test_urpf.py
@@ -0,0 +1,305 @@
+#!/usr/bin/env python3
+
+import unittest
+
+from framework import VppTestCase, VppTestRunner
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, UDP, ICMP
+from scapy.layers.inet6 import IPv6
+
+from vpp_papi import VppEnum
+
+N_PKTS = 63
+
+
+class TestURPF(VppTestCase):
+ """ Unicast Reverse Path Forwarding Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestURPF, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestURPF, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestURPF, self).setUp()
+
+ # create 4 pg interfaces so there are a few addresses
+ # in the FIB
+ self.create_pg_interfaces(range(4))
+
+ for i in self.pg_interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+ i.config_ip6()
+ i.resolve_ndp()
+
+ def tearDown(self):
+ for i in self.pg_interfaces:
+ i.unconfig_ip4()
+ i.unconfig_ip6()
+ i.admin_down()
+ super(TestURPF, self).tearDown()
+
+ def test_urpf4(self):
+ """ uRPF IP4 """
+
+ e = VppEnum
+ p_spoof_loose = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ IP(src="3.3.3.3", dst=self.pg1.remote_ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)) * N_PKTS
+ p_spoof_strict = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ IP(src=self.pg2.remote_ip4,
+ dst=self.pg1.remote_ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)) * N_PKTS
+ p_good = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4,
+ dst=self.pg1.remote_ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)) * N_PKTS
+
+ #
+ # before adding the uRPF, ensure all packets are forwarded
+ #
+ self.send_and_expect(self.pg0, p_good, self.pg1)
+ self.send_and_expect(self.pg0, p_spoof_strict, self.pg1)
+ self.send_and_expect(self.pg0, p_spoof_loose, self.pg1)
+
+ #
+ # apply loose uRPF check on pg0 rx
+ #
+ self.vapi.urpf_update(is_input=True,
+ mode=e.vl_api_urpf_mode_t.URPF_API_MODE_LOOSE,
+ af=e.vl_api_address_family_t.ADDRESS_IP4,
+ sw_if_index=self.pg0.sw_if_index)
+
+ # good packets still pass
+ self.send_and_expect(self.pg0, p_good, self.pg1)
+ # packets from address for which there is a route are forwarded
+ self.send_and_expect(self.pg0, p_spoof_strict, self.pg1)
+ # packets from address to which there is no route are dropped
+ self.send_and_assert_no_replies(self.pg0, p_spoof_loose)
+
+ self.assert_error_counter_equal("/err/ip4-rx-urpf-loose/uRPF Drop",
+ N_PKTS)
+
+ #
+ # crank it up to strict mode
+ #
+ self.vapi.urpf_update(is_input=True,
+ mode=e.vl_api_urpf_mode_t.URPF_API_MODE_STRICT,
+ af=e.vl_api_address_family_t.ADDRESS_IP4,
+ sw_if_index=self.pg0.sw_if_index)
+
+ # good packets still pass
+ self.send_and_expect(self.pg0, p_good, self.pg1)
+ # packets that would not be routed back thru pg0 are dropped
+ self.send_and_assert_no_replies(self.pg0, p_spoof_strict)
+ self.send_and_assert_no_replies(self.pg0, p_spoof_loose)
+
+ self.assert_error_counter_equal("/err/ip4-rx-urpf-strict/uRPF Drop",
+ 2 * N_PKTS)
+
+ #
+ # disable uRPF, all traffic should pass
+ #
+ self.vapi.urpf_update(is_input=True,
+ mode=e.vl_api_urpf_mode_t.URPF_API_MODE_OFF,
+ af=e.vl_api_address_family_t.ADDRESS_IP4,
+ sw_if_index=self.pg0.sw_if_index)
+
+ self.send_and_expect(self.pg0, p_good, self.pg1)
+ self.send_and_expect(self.pg0, p_spoof_strict, self.pg1)
+ self.send_and_expect(self.pg0, p_spoof_loose, self.pg1)
+
+ #
+ # Now apply in the TX direction
+ # for loose it is the same deal, they should not be forwarded
+ # if there's no route
+ # for strict they should not be forwarded if they would be
+ # forwarded thru that interface.
+ #
+ self.vapi.urpf_update(is_input=False,
+ mode=e.vl_api_urpf_mode_t.URPF_API_MODE_LOOSE,
+ af=e.vl_api_address_family_t.ADDRESS_IP4,
+ sw_if_index=self.pg1.sw_if_index)
+
+ self.send_and_expect(self.pg0, p_good, self.pg1)
+ self.send_and_expect(self.pg0, p_spoof_strict, self.pg1)
+ self.send_and_assert_no_replies(self.pg0, p_spoof_loose)
+
+ self.assert_error_counter_equal("/err/ip4-tx-urpf-loose/uRPF Drop",
+ N_PKTS)
+
+ self.vapi.urpf_update(is_input=False,
+ mode=e.vl_api_urpf_mode_t.URPF_API_MODE_STRICT,
+ af=e.vl_api_address_family_t.ADDRESS_IP4,
+ sw_if_index=self.pg1.sw_if_index)
+
+ self.send_and_expect(self.pg0, p_good, self.pg1)
+ # the strict packet, from a peer is allowed, since it does
+ # not forward via pg1
+ self.send_and_expect(self.pg0, p_spoof_strict, self.pg1)
+ self.send_and_assert_no_replies(self.pg0, p_spoof_loose)
+
+ self.assert_error_counter_equal("/err/ip4-tx-urpf-strict/uRPF Drop",
+ N_PKTS)
+
+ # change the strict packet so that it would forward through pg1
+ p_spoof_strict = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ IP(src=self.pg1.remote_ip4,
+ dst=self.pg1.remote_ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(b'\xa5' * 100)) * N_PKTS
+
+ self.send_and_assert_no_replies(self.pg0, p_spoof_strict)
+ self.assert_error_counter_equal("/err/ip4-tx-urpf-strict/uRPF Drop",
+ 2 * N_PKTS)
+
+ # cleanup
+ self.vapi.urpf_update(is_input=False,
+ mode=e.vl_api_urpf_mode_t.URPF_API_MODE_OFF,
+ af=e.vl_api_address_family_t.ADDRESS_IP4,
+ sw_if_index=self.pg1.sw_if_index)
+
+ def test_urpf6(self):
+ """ uRPF IP6 """
+
+ e = VppEnum
+ p_spoof_loose = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ IPv6(src="3::3", dst=self.pg1.remote_ip6) /
+ UDP(sport=1236, dport=1236) /
+ Raw(b'\xa5' * 100)) * N_PKTS
+ p_spoof_strict = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ IPv6(src=self.pg2.remote_ip6,
+ dst=self.pg1.remote_ip6) /
+ UDP(sport=1236, dport=1236) /
+ Raw(b'\xa5' * 100)) * N_PKTS
+ p_good = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ IPv6(src=self.pg0.remote_ip6,
+ dst=self.pg1.remote_ip6) /
+ UDP(sport=1236, dport=1236) /
+ Raw(b'\xa5' * 100)) * N_PKTS
+
+ #
+ # before adding the uRPF, ensure all packets are forwarded
+ #
+ self.send_and_expect(self.pg0, p_good, self.pg1)
+ self.send_and_expect(self.pg0, p_spoof_strict, self.pg1)
+ self.send_and_expect(self.pg0, p_spoof_loose, self.pg1)
+
+ #
+ # apply loose uRPF check on pg0 rx
+ #
+ self.vapi.urpf_update(is_input=True,
+ mode=e.vl_api_urpf_mode_t.URPF_API_MODE_LOOSE,
+ af=e.vl_api_address_family_t.ADDRESS_IP6,
+ sw_if_index=self.pg0.sw_if_index)
+
+ # good packets still pass
+ self.send_and_expect(self.pg0, p_good, self.pg1)
+ # packets from address for which there is a route are forwarded
+ self.send_and_expect(self.pg0, p_spoof_strict, self.pg1)
+ # packets from address to which there is no route are dropped
+ self.send_and_assert_no_replies(self.pg0, p_spoof_loose)
+
+ self.assert_error_counter_equal("/err/ip6-rx-urpf-loose/uRPF Drop",
+ N_PKTS)
+
+ #
+ # crank it up to strict mode
+ #
+ self.vapi.urpf_update(is_input=True,
+ mode=e.vl_api_urpf_mode_t.URPF_API_MODE_STRICT,
+ af=e.vl_api_address_family_t.ADDRESS_IP6,
+ sw_if_index=self.pg0.sw_if_index)
+
+ # good packets still pass
+ self.send_and_expect(self.pg0, p_good, self.pg1)
+ # packets that would not be routed back thru pg0 are dropped
+ self.send_and_assert_no_replies(self.pg0, p_spoof_strict)
+ self.send_and_assert_no_replies(self.pg0, p_spoof_loose)
+
+ self.assert_error_counter_equal("/err/ip6-rx-urpf-strict/uRPF Drop",
+ 2 * N_PKTS)
+
+ #
+ # disable uRPF, all traffic should pass
+ #
+ self.vapi.urpf_update(is_input=True,
+ mode=e.vl_api_urpf_mode_t.URPF_API_MODE_OFF,
+ af=e.vl_api_address_family_t.ADDRESS_IP6,
+ sw_if_index=self.pg0.sw_if_index)
+
+ self.send_and_expect(self.pg0, p_good, self.pg1)
+ self.send_and_expect(self.pg0, p_spoof_strict, self.pg1)
+ self.send_and_expect(self.pg0, p_spoof_loose, self.pg1)
+
+ #
+ # Now apply in the TX direction
+ # for loose it is the same deal, they should not be forwarded
+ # if there's no route
+ # for strict they should not be forwarded if they would be
+ # forwarded thru that interface.
+ #
+ self.vapi.urpf_update(is_input=False,
+ mode=e.vl_api_urpf_mode_t.URPF_API_MODE_LOOSE,
+ af=e.vl_api_address_family_t.ADDRESS_IP6,
+ sw_if_index=self.pg1.sw_if_index)
+
+ self.send_and_expect(self.pg0, p_good, self.pg1)
+ self.send_and_expect(self.pg0, p_spoof_strict, self.pg1)
+ self.send_and_assert_no_replies(self.pg0, p_spoof_loose)
+
+ self.assert_error_counter_equal("/err/ip6-tx-urpf-loose/uRPF Drop",
+ N_PKTS)
+
+ self.vapi.urpf_update(is_input=False,
+ mode=e.vl_api_urpf_mode_t.URPF_API_MODE_STRICT,
+ af=e.vl_api_address_family_t.ADDRESS_IP6,
+ sw_if_index=self.pg1.sw_if_index)
+
+ self.send_and_expect(self.pg0, p_good, self.pg1)
+ # the strict packet, from a peer is allowed, since it does
+ # not forward via pg1
+ self.send_and_expect(self.pg0, p_spoof_strict, self.pg1)
+ self.send_and_assert_no_replies(self.pg0, p_spoof_loose)
+
+ self.assert_error_counter_equal("/err/ip6-tx-urpf-strict/uRPF Drop",
+ N_PKTS)
+
+ # change the strict packet so that it would forward through pg1
+ p_spoof_strict = (Ether(dst=self.pg0.local_mac,
+ src=self.pg0.remote_mac) /
+ IPv6(src=self.pg1.remote_ip6,
+ dst=self.pg1.remote_ip6) /
+ UDP(sport=1236, dport=1236) /
+ Raw(b'\xa5' * 100)) * N_PKTS
+
+ self.send_and_assert_no_replies(self.pg0, p_spoof_strict)
+ self.assert_error_counter_equal("/err/ip6-tx-urpf-strict/uRPF Drop",
+ 2 * N_PKTS)
+
+ # cleanup
+ self.vapi.urpf_update(is_input=False,
+ mode=e.vl_api_urpf_mode_t.URPF_API_MODE_OFF,
+ af=e.vl_api_address_family_t.ADDRESS_IP6,
+ sw_if_index=self.pg1.sw_if_index)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_vapi.py b/test/test_vapi.py
new file mode 100644
index 00000000000..d91099210d2
--- /dev/null
+++ b/test/test_vapi.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python3
+""" VAPI test """
+
+import unittest
+import os
+import signal
+from framework import VppTestCase, VppTestRunner, Worker
+
+
+class VAPITestCase(VppTestCase):
+ """ VAPI test """
+
+ @classmethod
+ def setUpClass(cls):
+ super(VAPITestCase, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(VAPITestCase, cls).tearDownClass()
+
+ def test_vapi_c(self):
+ """ run C VAPI tests """
+ var = "TEST_BR"
+ built_root = os.getenv(var, None)
+ self.assertIsNotNone(built_root,
+ "Environment variable `%s' not set" % var)
+ executable = "%s/vapi_test/vapi_c_test" % built_root
+ worker = Worker([executable, "vapi client",
+ self.get_api_segment_prefix()], self.logger)
+ worker.start()
+ timeout = 60
+ worker.join(timeout)
+ self.logger.info("Worker result is `%s'" % worker.result)
+ error = False
+ if worker.result is None:
+ try:
+ error = True
+ self.logger.error(
+ "Timeout! Worker did not finish in %ss" % timeout)
+ os.killpg(os.getpgid(worker.process.pid), signal.SIGTERM)
+ worker.join()
+ except:
+ self.logger.debug("Couldn't kill worker-spawned process")
+ raise
+ if error:
+ raise Exception(
+ "Timeout! Worker did not finish in %ss" % timeout)
+ self.assert_equal(worker.result, 0, "Binary test return code")
+
+ def test_vapi_cpp(self):
+ """ run C++ VAPI tests """
+ var = "TEST_BR"
+ built_root = os.getenv(var, None)
+ self.assertIsNotNone(built_root,
+ "Environment variable `%s' not set" % var)
+ executable = "%s/vapi_test/vapi_cpp_test" % built_root
+ worker = Worker([executable, "vapi client",
+ self.get_api_segment_prefix()], self.logger)
+ worker.start()
+ timeout = 120
+ worker.join(timeout)
+ self.logger.info("Worker result is `%s'" % worker.result)
+ error = False
+ if worker.result is None:
+ try:
+ error = True
+ self.logger.error(
+ "Timeout! Worker did not finish in %ss" % timeout)
+ os.killpg(os.getpgid(worker.process.pid), signal.SIGTERM)
+ worker.join()
+ except:
+ raise Exception("Couldn't kill worker-spawned process")
+ if error:
+ raise Exception(
+ "Timeout! Worker did not finish in %ss" % timeout)
+ self.assert_equal(worker.result, 0, "Binary test return code")
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_vpe_api.py b/test/test_vpe_api.py
new file mode 100644
index 00000000000..54f7e41151b
--- /dev/null
+++ b/test/test_vpe_api.py
@@ -0,0 +1,55 @@
+# Copyright (c) 2019. Vinci Consulting Corp. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import datetime
+import time
+import unittest
+from framework import VppTestCase
+
+enable_print = False
+
+
+class TestVpeApi(VppTestCase):
+ """TestVpeApi"""
+
+ def test_log_dump_default(self):
+ rv = self.vapi.cli('test log notice fib entry this is a test')
+ rv = self.vapi.log_dump()
+ if enable_print:
+ print('\n'.join([str(v) for v in rv]))
+ self.assertTrue(rv)
+
+ def test_log_dump_timestamp_0(self):
+ rv = self.vapi.cli('test log notice fib entry this is a test')
+ rv = self.vapi.log_dump(start_timestamp=0.0)
+ if enable_print:
+ print('\n'.join([str(v) for v in rv]))
+ self.assertTrue(rv)
+
+ def test_log_dump_timestamp_future(self):
+ rv = self.vapi.cli('test log debug fib entry test')
+ rv = self.vapi.log_dump(start_timestamp=time.time() + 60.0)
+ if enable_print:
+ print('\n'.join([str(v) for v in rv]))
+ self.assertFalse(rv)
+
+ def test_show_vpe_system_time(self):
+ local_start_time = datetime.datetime.now()
+ rv = self.vapi.show_vpe_system_time()
+ self.assertTrue(rv.vpe_system_time > local_start_time -
+ datetime.timedelta(hours=1.0),
+ 'system times differ by more than an hour.')
+ if enable_print:
+ print('\n'.join([str(v) for v in rv]))
+ print('%r %s' % (rv.vpe_system_time,
+ rv.vpe_system_time))
diff --git a/test/test_vppinfra.py b/test/test_vppinfra.py
new file mode 100644
index 00000000000..8b6ec965fea
--- /dev/null
+++ b/test/test_vppinfra.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python3
+
+import unittest
+
+from framework import VppTestCase, VppTestRunner, running_extended_tests
+from framework import running_gcov_tests
+
+
+class TestVppinfra(VppTestCase):
+ """ Vppinfra Unit Test Cases """
+ vpp_worker_count = 1
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestVppinfra, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestVppinfra, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestVppinfra, self).setUp()
+
+ def tearDown(self):
+ super(TestVppinfra, self).tearDown()
+
+ def test_bitmap_unittest(self):
+ """ Bitmap Code Coverage Test """
+ cmds = ["test bitmap"]
+
+ for cmd in cmds:
+ r = self.vapi.cli_return_response(cmd)
+ if r.retval != 0:
+ if hasattr(r, 'reply'):
+ self.logger.info(cmd + " FAIL reply " + r.reply)
+ else:
+ self.logger.info(cmd + " FAIL retval " + str(r.retval))
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_vrrp.py b/test/test_vrrp.py
new file mode 100644
index 00000000000..cc70613dfb5
--- /dev/null
+++ b/test/test_vrrp.py
@@ -0,0 +1,1293 @@
+#!/usr/bin/env python3
+
+#
+# Copyright 2019-2020 Rubicon Communications, LLC (Netgate)
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+import unittest
+import time
+import socket
+from socket import inet_pton, inet_ntop
+
+from vpp_object import VppObject
+from vpp_papi import VppEnum
+
+from scapy.packet import raw
+from scapy.layers.l2 import Ether, ARP
+from scapy.layers.inet import IP, ICMP, icmptypes
+from scapy.layers.inet6 import IPv6, ipv6nh, IPv6ExtHdrHopByHop, \
+ ICMPv6MLReport2, ICMPv6ND_NA, ICMPv6ND_NS, ICMPv6NDOptDstLLAddr, \
+ ICMPv6NDOptSrcLLAddr, ICMPv6EchoRequest, ICMPv6EchoReply
+from scapy.contrib.igmpv3 import IGMPv3, IGMPv3mr, IGMPv3gr
+from scapy.layers.vrrp import IPPROTO_VRRP, VRRPv3
+from scapy.utils6 import in6_getnsma, in6_getnsmac
+from framework import VppTestCase, VppTestRunner, running_extended_tests
+from util import ip6_normalize
+
+VRRP_VR_FLAG_PREEMPT = 1
+VRRP_VR_FLAG_ACCEPT = 2
+VRRP_VR_FLAG_UNICAST = 4
+VRRP_VR_FLAG_IPV6 = 8
+
+VRRP_VR_STATE_INIT = 0
+VRRP_VR_STATE_BACKUP = 1
+VRRP_VR_STATE_MASTER = 2
+VRRP_VR_STATE_INTF_DOWN = 3
+
+
+def is_non_arp(p):
+ """ Want to filter out advertisements, igmp, etc"""
+ if p.haslayer(ARP):
+ return False
+
+ return True
+
+
+def is_not_adv(p):
+ """ Filter out everything but advertisements. E.g. multicast RD/ND """
+ if p.haslayer(VRRPv3):
+ return False
+
+ return True
+
+
+def is_not_echo_reply(p):
+ """ filter out advertisements and other while waiting for echo reply """
+ if p.haslayer(IP) and p.haslayer(ICMP):
+ if icmptypes[p[ICMP].type] == "echo-reply":
+ return False
+ elif p.haslayer(IPv6) and p.haslayer(ICMPv6EchoReply):
+ return False
+
+ return True
+
+
+class VppVRRPVirtualRouter(VppObject):
+
+ def __init__(self,
+ test,
+ intf,
+ vr_id,
+ prio=100,
+ intvl=100,
+ flags=VRRP_VR_FLAG_PREEMPT,
+ vips=None):
+ self._test = test
+ self._intf = intf
+ self._sw_if_index = self._intf.sw_if_index
+ self._vr_id = vr_id
+ self._prio = prio
+ self._intvl = intvl
+ self._flags = flags
+ if (flags & VRRP_VR_FLAG_IPV6):
+ self._is_ipv6 = 1
+ self._adv_dest_mac = "33:33:00:00:00:12"
+ self._virtual_mac = "00:00:5e:00:02:%02x" % vr_id
+ self._adv_dest_ip = "ff02::12"
+ self._vips = ([intf.local_ip6] if vips is None else vips)
+ else:
+ self._is_ipv6 = 0
+ self._adv_dest_mac = "01:00:5e:00:00:12"
+ self._virtual_mac = "00:00:5e:00:01:%02x" % vr_id
+ self._adv_dest_ip = "224.0.0.18"
+ self._vips = ([intf.local_ip4] if vips is None else vips)
+ self._tracked_ifs = []
+
+ def add_vpp_config(self):
+ self._test.vapi.vrrp_vr_add_del(is_add=1,
+ sw_if_index=self._intf.sw_if_index,
+ vr_id=self._vr_id,
+ priority=self._prio,
+ interval=self._intvl,
+ flags=self._flags,
+ n_addrs=len(self._vips),
+ addrs=self._vips)
+
+ def query_vpp_config(self):
+ vrs = self._test.vapi.vrrp_vr_dump(sw_if_index=self._intf.sw_if_index)
+ for vr in vrs:
+ if vr.config.vr_id != self._vr_id:
+ continue
+
+ is_ipv6 = (1 if (vr.config.flags & VRRP_VR_FLAG_IPV6) else 0)
+ if is_ipv6 != self._is_ipv6:
+ continue
+
+ return vr
+
+ return None
+
+ def remove_vpp_config(self):
+ self._test.vapi.vrrp_vr_add_del(is_add=0,
+ sw_if_index=self._intf.sw_if_index,
+ vr_id=self._vr_id,
+ priority=self._prio,
+ interval=self._intvl,
+ flags=self._flags,
+ n_addrs=len(self._vips),
+ addrs=self._vips)
+
+ def start_stop(self, is_start):
+ self._test.vapi.vrrp_vr_start_stop(is_start=is_start,
+ sw_if_index=self._intf.sw_if_index,
+ vr_id=self._vr_id,
+ is_ipv6=self._is_ipv6)
+ self._start_time = (time.time() if is_start else None)
+
+ def add_del_tracked_interface(self, is_add, sw_if_index, prio):
+ args = {
+ 'sw_if_index': self._intf.sw_if_index,
+ 'is_ipv6': self._is_ipv6,
+ 'vr_id': self._vr_id,
+ 'is_add': is_add,
+ 'n_ifs': 1,
+ 'ifs': [{'sw_if_index': sw_if_index, 'priority': prio}]
+ }
+ self._test.vapi.vrrp_vr_track_if_add_del(**args)
+ self._tracked_ifs.append(args['ifs'][0])
+
+ def set_unicast_peers(self, addrs):
+ args = {
+ 'sw_if_index': self._intf.sw_if_index,
+ 'is_ipv6': self._is_ipv6,
+ 'vr_id': self._vr_id,
+ 'n_addrs': len(addrs),
+ 'addrs': addrs
+ }
+ self._test.vapi.vrrp_vr_set_peers(**args)
+ self._unicast_peers = addrs
+
+ def start_time(self):
+ return self._start_time
+
+ def virtual_mac(self):
+ return self._virtual_mac
+
+ def virtual_ips(self):
+ return self._vips
+
+ def adv_dest_mac(self):
+ return self._adv_dest_mac
+
+ def adv_dest_ip(self):
+ return self._adv_dest_ip
+
+ def priority(self):
+ return self._prio
+
+ def vr_id(self):
+ return self._vr_id
+
+ def adv_interval(self):
+ return self._intvl
+
+ def interface(self):
+ return self._intf
+
+ def assert_state_equals(self, state):
+ vr_details = self.query_vpp_config()
+ self._test.assertEqual(vr_details.runtime.state, state)
+
+ def master_down_seconds(self):
+ vr_details = self.query_vpp_config()
+ return (vr_details.runtime.master_down_int * 0.01)
+
+
+class VrrpCommonMixin:
+ def vrrp_adv_packet(self, prio=None, src_ip=None):
+ dst_ip = self._adv_dest_ip
+ if prio is None:
+ prio = self._prio
+ eth = Ether(dst=self._adv_dest_mac, src=self._virtual_mac)
+ vrrp = VRRPv3(vrid=self._vr_id, priority=prio,
+ ipcount=len(self._vips), adv=self._intvl)
+ if self._is_ipv6:
+ src_ip = (self._intf.local_ip6_ll if src_ip is None else src_ip)
+ ip = IPv6(src=src_ip, dst=dst_ip, nh=IPPROTO_VRRP, hlim=255)
+ vrrp.addrlist = self._vips
+ else:
+ src_ip = (self._intf.local_ip4 if src_ip is None else src_ip)
+ ip = IP(src=src_ip, dst=dst_ip, proto=IPPROTO_VRRP, ttl=255, id=0)
+ vrrp.addrlist = self._vips
+
+ # Fill in default values & checksums
+ pkt = Ether(raw(eth / ip / vrrp))
+ return pkt
+
+
+@unittest.skipUnless(running_extended_tests, "part of extended tests")
+class TestVRRP4(VrrpCommonMixin, VppTestCase):
+ """ IPv4 VRRP Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestVRRP4, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestVRRP4, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestVRRP4, self).setUp()
+
+ self.create_pg_interfaces(range(2))
+
+ for i in self.pg_interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.generate_remote_hosts(5)
+ i.configure_ipv4_neighbors()
+
+ self._vrs = []
+ self._default_flags = VRRP_VR_FLAG_PREEMPT
+ self._default_adv = 100
+
+ def tearDown(self):
+ for vr in self._vrs:
+ try:
+ vr_api = vr.query_vpp_config()
+ if vr_api.runtime.state != VRRP_VR_STATE_INIT:
+ vr.start_stop(is_start=0)
+ vr.remove_vpp_config()
+ except:
+ self.logger.error("Error cleaning up")
+
+ for i in self.pg_interfaces:
+ i.admin_down()
+ i.unconfig_ip4()
+ i.unconfig_ip6()
+
+ self._vrs = []
+
+ super(TestVRRP4, self).tearDown()
+
+ def verify_vrrp4_igmp(self, pkt):
+ ip = pkt[IP]
+ self.assertEqual(ip.dst, "224.0.0.22")
+ self.assertEqual(ip.proto, 2)
+
+ igmp = pkt[IGMPv3]
+ self.assertEqual(IGMPv3.igmpv3types[igmp.type],
+ "Version 3 Membership Report")
+
+ igmpmr = pkt[IGMPv3mr]
+ self.assertEqual(igmpmr.numgrp, 1)
+ self.assertEqual(igmpmr.records[0].maddr, "224.0.0.18")
+
+ def verify_vrrp4_garp(self, pkt, vip, vmac):
+ arp = pkt[ARP]
+
+ # ARP "who-has" op == 1
+ self.assertEqual(arp.op, 1)
+ self.assertEqual(arp.pdst, arp.psrc)
+ self.assertEqual(arp.pdst, vip)
+ self.assertEqual(arp.hwsrc, vmac)
+
+ def verify_vrrp4_adv(self, rx_pkt, vr, prio=None):
+ vips = vr.virtual_ips()
+ eth = rx_pkt[Ether]
+ ip = rx_pkt[IP]
+ vrrp = rx_pkt[VRRPv3]
+
+ pkt = self.vrrp_adv_packet(prio=prio)
+
+ # Source MAC is virtual MAC, destination is multicast MAC
+ self.assertEqual(eth.src, vr.virtual_mac())
+ self.assertEqual(eth.dst, vr.adv_dest_mac())
+
+ self.assertEqual(ip.dst, "224.0.0.18")
+ self.assertEqual(ip.ttl, 255)
+ self.assertEqual(ip.proto, IPPROTO_VRRP)
+
+ self.assertEqual(vrrp.version, 3)
+ self.assertEqual(vrrp.type, 1)
+ self.assertEqual(vrrp.vrid, vr.vr_id())
+ if prio is None:
+ prio = vr.priority()
+ self.assertEqual(vrrp.priority, prio)
+ self.assertEqual(vrrp.ipcount, len(vips))
+ self.assertEqual(vrrp.adv, vr.adv_interval())
+ self.assertListEqual(vrrp.addrlist, vips)
+
+ # VR with priority 255 owns the virtual address and should
+ # become master and start advertising immediately.
+ def test_vrrp4_master_adv(self):
+ """ IPv4 Master VR advertises """
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ prio = 255
+ intvl = self._default_adv
+ vr = VppVRRPVirtualRouter(self, self.pg0, 100,
+ prio=prio, intvl=intvl,
+ flags=self._default_flags)
+
+ vr.add_vpp_config()
+ vr.start_stop(is_start=1)
+ self.logger.info(self.vapi.cli("show vrrp vr"))
+ vr.start_stop(is_start=0)
+ self.logger.info(self.vapi.cli("show vrrp vr"))
+
+ pkts = self.pg0.get_capture(4)
+
+ # Init -> Master: IGMP Join, VRRP adv, gratuitous ARP are sent
+ self.verify_vrrp4_igmp(pkts[0])
+ self.verify_vrrp4_adv(pkts[1], vr, prio=prio)
+ self.verify_vrrp4_garp(pkts[2], vr.virtual_ips()[0], vr.virtual_mac())
+ # Master -> Init: Adv with priority 0 sent to force an election
+ self.verify_vrrp4_adv(pkts[3], vr, prio=0)
+
+ vr.remove_vpp_config()
+ self._vrs = []
+
+ # VR with priority < 255 enters backup state and does not advertise as
+ # long as it receives higher priority advertisements
+ def test_vrrp4_backup_noadv(self):
+ """ IPv4 Backup VR does not advertise """
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ vr_id = 100
+ prio = 100
+ intvl = self._default_adv
+ intvl_s = intvl * 0.01
+ vr = VppVRRPVirtualRouter(self, self.pg0, vr_id,
+ prio=prio, intvl=intvl,
+ flags=self._default_flags,
+ vips=[self.pg0.remote_ip4])
+ self._vrs.append(vr)
+ vr.add_vpp_config()
+
+ vr.start_stop(is_start=1)
+
+ vr.assert_state_equals(VRRP_VR_STATE_BACKUP)
+ # watch for advertisements for 2x the master down preemption timeout
+ end_time = vr.start_time() + 2 * vr.master_down_seconds()
+
+ # Init -> Backup: An IGMP join should be sent
+ pkts = self.pg0.get_capture(1)
+ self.verify_vrrp4_igmp(pkts[0])
+
+ # send higher prio advertisements, should not receive any
+ src_ip = self.pg0.remote_ip4
+ pkts = [self.vrrp_adv_packet(prio=prio+10, src_ip=src_ip)]
+ while time.time() < end_time:
+ self.send_and_assert_no_replies(self.pg0, pkts, timeout=intvl_s)
+ self.logger.info(self.vapi.cli("show trace"))
+
+ vr.start_stop(is_start=0)
+ self.logger.info(self.vapi.cli("show vrrp vr"))
+ vr.remove_vpp_config()
+ self._vrs = []
+
+ def test_vrrp4_master_arp(self):
+ """ IPv4 Master VR replies to ARP """
+ self.pg_start()
+
+ # VR virtual IP is the default, which is the pg local IP
+ vr_id = 100
+ prio = 255
+ intvl = self._default_adv
+ vr = VppVRRPVirtualRouter(self, self.pg0, 100,
+ prio=prio, intvl=intvl,
+ flags=self._default_flags)
+ self._vrs.append(vr)
+
+ vr.add_vpp_config()
+
+ # before the VR is up, ARP should resolve to interface MAC
+ self.pg0.resolve_arp()
+ self.assertNotEqual(self.pg0.local_mac, vr.virtual_mac())
+
+ # start the VR, ARP should now resolve to virtual MAC
+ vr.start_stop(is_start=1)
+ self.pg0.resolve_arp()
+ self.assertEqual(self.pg0.local_mac, vr.virtual_mac())
+
+ # stop the VR, ARP should resolve to interface MAC again
+ vr.start_stop(is_start=0)
+ self.pg0.resolve_arp()
+ self.assertNotEqual(self.pg0.local_mac, vr.virtual_mac())
+
+ vr.remove_vpp_config()
+ self._vrs = []
+
+ def test_vrrp4_backup_noarp(self):
+ """ IPv4 Backup VR ignores ARP """
+ # We need an address for a virtual IP that is not the IP that
+ # ARP requests will originate from
+
+ vr_id = 100
+ prio = 100
+ intvl = self._default_adv
+ vip = self.pg0.remote_hosts[1].ip4
+ vr = VppVRRPVirtualRouter(self, self.pg0, vr_id,
+ prio=prio, intvl=intvl,
+ flags=self._default_flags,
+ vips=[vip])
+ self._vrs.append(vr)
+ vr.add_vpp_config()
+
+ arp_req = (Ether(dst="ff:ff:ff:ff:ff:ff", src=self.pg0.remote_mac) /
+ ARP(op=ARP.who_has, pdst=vip,
+ psrc=self.pg0.remote_ip4, hwsrc=self.pg0.remote_mac))
+
+ # Before the VR is started make sure no reply to request for VIP
+ self.pg_start()
+ self.pg_enable_capture(self.pg_interfaces)
+ self.send_and_assert_no_replies(self.pg0, [arp_req], timeout=1)
+
+ # VR should start in backup state and still should not reply to ARP
+ # send a higher priority adv to make sure it does not become master
+ adv = self.vrrp_adv_packet(prio=prio+10, src_ip=self.pg0.remote_ip4)
+ vr.start_stop(is_start=1)
+ self.send_and_assert_no_replies(self.pg0, [adv, arp_req], timeout=1)
+
+ vr.start_stop(is_start=0)
+ vr.remove_vpp_config()
+ self._vrs = []
+
+ def test_vrrp4_election(self):
+ """ IPv4 Backup VR becomes master if no advertisements received """
+
+ vr_id = 100
+ prio = 100
+ intvl = self._default_adv
+ intvl_s = intvl * 0.01
+ vip = self.pg0.remote_ip4
+ vr = VppVRRPVirtualRouter(self, self.pg0, vr_id,
+ prio=prio, intvl=intvl,
+ flags=self._default_flags,
+ vips=[vip])
+ self._vrs.append(vr)
+ vr.add_vpp_config()
+
+ # After adding the VR, it should be in the init state
+ vr.assert_state_equals(VRRP_VR_STATE_INIT)
+
+ self.pg_start()
+ vr.start_stop(is_start=1)
+
+ # VR should be in backup state after starting
+ vr.assert_state_equals(VRRP_VR_STATE_BACKUP)
+ end_time = vr.start_time() + vr.master_down_seconds()
+
+ # should not receive adverts until timer expires & state transition
+ self.pg_enable_capture(self.pg_interfaces)
+ while (time.time() + intvl_s) < end_time:
+ time.sleep(intvl_s)
+ self.pg0.assert_nothing_captured(filter_out_fn=is_not_adv)
+
+ # VR should be in master state, should send an adv
+ self.pg0.enable_capture()
+ self.pg0.wait_for_packet(intvl_s, is_not_adv)
+ vr.assert_state_equals(VRRP_VR_STATE_MASTER)
+
+ def test_vrrp4_backup_preempts(self):
+ """ IPv4 Backup VR preempts lower priority master """
+
+ vr_id = 100
+ prio = 100
+ intvl = self._default_adv
+ intvl_s = intvl * 0.01
+ vip = self.pg0.remote_ip4
+ vr = VppVRRPVirtualRouter(self, self.pg0, vr_id,
+ prio=prio, intvl=intvl,
+ flags=self._default_flags,
+ vips=[vip])
+ self._vrs.append(vr)
+ vr.add_vpp_config()
+
+ # After adding the VR, it should be in the init state
+ vr.assert_state_equals(VRRP_VR_STATE_INIT)
+
+ self.pg_start()
+ vr.start_stop(is_start=1)
+
+ # VR should be in backup state after starting
+ vr.assert_state_equals(VRRP_VR_STATE_BACKUP)
+ end_time = vr.start_time() + vr.master_down_seconds()
+
+ # send lower prio advertisements until timer expires
+ src_ip = self.pg0.remote_ip4
+ pkts = [self.vrrp_adv_packet(prio=prio-10, src_ip=src_ip)]
+ while time.time() + intvl_s < end_time:
+ self.send_and_assert_no_replies(self.pg0, pkts, timeout=intvl_s)
+ self.logger.info(self.vapi.cli("show trace"))
+
+ # when timer expires, VR should take over as master
+ self.pg0.enable_capture()
+ self.pg0.wait_for_packet(timeout=intvl_s, filter_out_fn=is_not_adv)
+ vr.assert_state_equals(VRRP_VR_STATE_MASTER)
+
+ def test_vrrp4_master_preempted(self):
+ """ IPv4 Master VR preempted by higher priority backup """
+
+ # A prio 255 VR cannot be preempted so the prio has to be lower and
+ # we have to wait for it to take over
+ vr_id = 100
+ prio = 100
+ intvl = self._default_adv
+ vip = self.pg0.remote_ip4
+ vr = VppVRRPVirtualRouter(self, self.pg0, vr_id,
+ prio=prio, intvl=intvl,
+ flags=self._default_flags,
+ vips=[vip])
+ self._vrs.append(vr)
+ vr.add_vpp_config()
+
+ # After adding the VR, it should be in the init state
+ vr.assert_state_equals(VRRP_VR_STATE_INIT)
+
+ # start VR
+ vr.start_stop(is_start=1)
+ vr.assert_state_equals(VRRP_VR_STATE_BACKUP)
+
+ # wait for VR to take over as master
+ end_time = vr.start_time() + vr.master_down_seconds()
+ sleep_s = end_time - time.time()
+ time.sleep(sleep_s)
+ vr.assert_state_equals(VRRP_VR_STATE_MASTER)
+
+ # Build advertisement packet and send it
+ pkts = [self.vrrp_adv_packet(prio=255, src_ip=self.pg0.remote_ip4)]
+ self.pg_send(self.pg0, pkts)
+
+ # VR should be in backup state again
+ vr.assert_state_equals(VRRP_VR_STATE_BACKUP)
+
+ def test_vrrp4_accept_mode_disabled(self):
+ """ IPv4 Master VR does not reply for VIP w/ accept mode off """
+
+ # accept mode only matters when prio < 255, so it will have to
+ # come up as a backup and take over as master after the timeout
+ vr_id = 100
+ prio = 100
+ intvl = self._default_adv
+ vip = self.pg0.remote_hosts[4].ip4
+ vr = VppVRRPVirtualRouter(self, self.pg0, vr_id,
+ prio=prio, intvl=intvl,
+ flags=self._default_flags,
+ vips=[vip])
+ self._vrs.append(vr)
+ vr.add_vpp_config()
+
+ # After adding the VR, it should be in the init state
+ vr.assert_state_equals(VRRP_VR_STATE_INIT)
+
+ # start VR
+ vr.start_stop(is_start=1)
+ vr.assert_state_equals(VRRP_VR_STATE_BACKUP)
+
+ # wait for VR to take over as master
+ end_time = vr.start_time() + vr.master_down_seconds()
+ sleep_s = end_time - time.time()
+ time.sleep(sleep_s)
+ vr.assert_state_equals(VRRP_VR_STATE_MASTER)
+
+ # send an ICMP echo to the VR virtual IP address
+ echo = (Ether(dst=vr.virtual_mac(), src=self.pg0.remote_mac) /
+ IP(dst=vip, src=self.pg0.remote_ip4) /
+ ICMP(seq=1, id=self.pg0.sw_if_index, type='echo-request'))
+ self.pg_send(self.pg0, [echo])
+
+ # wait for an echo reply. none should be received
+ time.sleep(1)
+ self.pg0.assert_nothing_captured(filter_out_fn=is_not_echo_reply)
+
+ def test_vrrp4_accept_mode_enabled(self):
+ """ IPv4 Master VR replies for VIP w/ accept mode on """
+
+ # A prio 255 VR cannot be preempted so the prio has to be lower and
+ # we have to wait for it to take over
+ vr_id = 100
+ prio = 100
+ intvl = self._default_adv
+ vip = self.pg0.remote_hosts[4].ip4
+ flags = (VRRP_VR_FLAG_PREEMPT | VRRP_VR_FLAG_ACCEPT)
+ vr = VppVRRPVirtualRouter(self, self.pg0, vr_id,
+ prio=prio, intvl=intvl,
+ flags=flags,
+ vips=[vip])
+ self._vrs.append(vr)
+ vr.add_vpp_config()
+
+ # After adding the VR, it should be in the init state
+ vr.assert_state_equals(VRRP_VR_STATE_INIT)
+
+ # start VR
+ vr.start_stop(is_start=1)
+ vr.assert_state_equals(VRRP_VR_STATE_BACKUP)
+
+ # wait for VR to take over as master
+ end_time = vr.start_time() + vr.master_down_seconds()
+ sleep_s = end_time - time.time()
+ time.sleep(sleep_s)
+ vr.assert_state_equals(VRRP_VR_STATE_MASTER)
+
+ # send an ICMP echo to the VR virtual IP address
+ echo = (Ether(dst=vr.virtual_mac(), src=self.pg0.remote_mac) /
+ IP(dst=vip, src=self.pg0.remote_ip4) /
+ ICMP(seq=1, id=self.pg0.sw_if_index, type='echo-request'))
+ self.pg_send(self.pg0, [echo])
+
+ # wait for an echo reply.
+ time.sleep(1)
+ rx_pkts = self.pg0.get_capture(expected_count=1, timeout=1,
+ filter_out_fn=is_not_echo_reply)
+
+ self.assertEqual(rx_pkts[0][IP].src, vip)
+ self.assertEqual(rx_pkts[0][IP].dst, self.pg0.remote_ip4)
+ self.assertEqual(icmptypes[rx_pkts[0][ICMP].type], "echo-reply")
+ self.assertEqual(rx_pkts[0][ICMP].seq, 1)
+ self.assertEqual(rx_pkts[0][ICMP].id, self.pg0.sw_if_index)
+
+ def test_vrrp4_intf_tracking(self):
+ """ IPv4 Master VR adjusts priority based on tracked interface """
+
+ vr_id = 100
+ prio = 255
+ intvl = self._default_adv
+ intvl_s = intvl * 0.01
+ vip = self.pg0.local_ip4
+ vr = VppVRRPVirtualRouter(self, self.pg0, vr_id,
+ prio=prio, intvl=intvl,
+ flags=self._default_flags,
+ vips=[vip])
+ self._vrs.append(vr)
+ vr.add_vpp_config()
+
+ # After adding the VR, it should be in the init state
+ vr.assert_state_equals(VRRP_VR_STATE_INIT)
+
+ # add pg1 as a tracked interface and start the VR
+ adjustment = 50
+ adjusted_prio = prio - adjustment
+ vr.add_del_tracked_interface(is_add=1,
+ sw_if_index=self.pg1.sw_if_index,
+ prio=adjustment)
+ vr.start_stop(is_start=1)
+ vr.assert_state_equals(VRRP_VR_STATE_MASTER)
+
+ adv_configured = self.vrrp_adv_packet(prio=prio)
+ adv_adjusted = self.vrrp_adv_packet(prio=adjusted_prio)
+
+ # tracked intf is up -> advertised priority == configured priority
+ self.pg0.enable_capture()
+ rx = self.pg0.wait_for_packet(timeout=intvl_s,
+ filter_out_fn=is_not_adv)
+ self.assertEqual(rx, adv_configured)
+
+ # take down pg1, verify priority is now being adjusted
+ self.pg1.admin_down()
+ self.pg0.enable_capture()
+ rx = self.pg0.wait_for_packet(timeout=intvl_s,
+ filter_out_fn=is_not_adv)
+ self.assertEqual(rx, adv_adjusted)
+
+ # bring up pg1, verify priority now matches configured value
+ self.pg1.admin_up()
+ self.pg0.enable_capture()
+ rx = self.pg0.wait_for_packet(timeout=intvl_s,
+ filter_out_fn=is_not_adv)
+ self.assertEqual(rx, adv_configured)
+
+ # remove IP address from pg1, verify priority now being adjusted
+ self.pg1.unconfig_ip4()
+ self.pg0.enable_capture()
+ rx = self.pg0.wait_for_packet(timeout=intvl_s,
+ filter_out_fn=is_not_adv)
+ self.assertEqual(rx, adv_adjusted)
+
+ # add IP address to pg1, verify priority now matches configured value
+ self.pg1.config_ip4()
+ self.pg0.enable_capture()
+ rx = self.pg0.wait_for_packet(timeout=intvl_s,
+ filter_out_fn=is_not_adv)
+ self.assertEqual(rx, adv_configured)
+
+ def test_vrrp4_master_adv_unicast(self):
+ """ IPv4 Master VR advertises (unicast) """
+
+ vr_id = 100
+ prio = 255
+ intvl = self._default_adv
+ intvl_s = intvl * 0.01
+ vip = self.pg0.local_ip4
+ flags = (self._default_flags | VRRP_VR_FLAG_UNICAST)
+ unicast_peer = self.pg0.remote_hosts[4]
+ vr = VppVRRPVirtualRouter(self, self.pg0, vr_id,
+ prio=prio, intvl=intvl,
+ flags=flags,
+ vips=[vip])
+ self._vrs.append(vr)
+ vr.add_vpp_config()
+ vr.set_unicast_peers([unicast_peer.ip4])
+
+ # After adding the VR, it should be in the init state
+ vr.assert_state_equals(VRRP_VR_STATE_INIT)
+
+ # Start VR, transition to master
+ vr.start_stop(is_start=1)
+ vr.assert_state_equals(VRRP_VR_STATE_MASTER)
+
+ self.pg0.enable_capture()
+ rx = self.pg0.wait_for_packet(timeout=intvl_s,
+ filter_out_fn=is_not_adv)
+
+ self.assertTrue(rx.haslayer(Ether))
+ self.assertTrue(rx.haslayer(IP))
+ self.assertTrue(rx.haslayer(VRRPv3))
+ self.assertEqual(rx[Ether].src, self.pg0.local_mac)
+ self.assertEqual(rx[Ether].dst, unicast_peer.mac)
+ self.assertEqual(rx[IP].src, self.pg0.local_ip4)
+ self.assertEqual(rx[IP].dst, unicast_peer.ip4)
+ self.assertEqual(rx[VRRPv3].vrid, vr_id)
+ self.assertEqual(rx[VRRPv3].priority, prio)
+ self.assertEqual(rx[VRRPv3].ipcount, 1)
+ self.assertEqual(rx[VRRPv3].addrlist, [vip])
+
+
+@unittest.skipUnless(running_extended_tests, "part of extended tests")
+class TestVRRP6(VrrpCommonMixin, VppTestCase):
+ """ IPv6 VRRP Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestVRRP6, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestVRRP6, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestVRRP6, self).setUp()
+
+ self.create_pg_interfaces(range(2))
+
+ for i in self.pg_interfaces:
+ i.admin_up()
+ i.config_ip6()
+ i.generate_remote_hosts(5)
+ i.configure_ipv6_neighbors()
+
+ self._vrs = []
+ self._default_flags = (VRRP_VR_FLAG_IPV6 | VRRP_VR_FLAG_PREEMPT)
+ self._default_adv = 100
+
+ def tearDown(self):
+ for vr in self._vrs:
+ try:
+ vr_api = vr.query_vpp_config()
+ if vr_api.runtime.state != VRRP_VR_STATE_INIT:
+ vr.start_stop(is_start=0)
+ vr.remove_vpp_config()
+ except:
+ self.logger.error("Error cleaning up")
+
+ for i in self.pg_interfaces:
+ i.admin_down()
+ i.unconfig_ip4()
+ i.unconfig_ip6()
+
+ self._vrs = []
+
+ super(TestVRRP6, self).tearDown()
+
+ def verify_vrrp6_mlr(self, pkt, vr):
+ ip6 = pkt[IPv6]
+ self.assertEqual(ip6.dst, "ff02::16")
+ self.assertEqual(ipv6nh[ip6.nh], "Hop-by-Hop Option Header")
+
+ hbh = pkt[IPv6ExtHdrHopByHop]
+ self.assertEqual(ipv6nh[hbh.nh], "ICMPv6")
+
+ self.assertTrue(pkt.haslayer(ICMPv6MLReport2))
+ mlr = pkt[ICMPv6MLReport2]
+ # should contain mc addr records for:
+ # - VRRPv3 multicast addr
+ # - solicited node mc addr record for each VR virtual IPv6 address
+ vips = vr.virtual_ips()
+ self.assertEqual(mlr.records_number, len(vips) + 1)
+ self.assertEqual(mlr.records[0].dst, vr.adv_dest_ip())
+
+ def verify_vrrp6_adv(self, rx_pkt, vr, prio=None):
+ self.assertTrue(rx_pkt.haslayer(Ether))
+ self.assertTrue(rx_pkt.haslayer(IPv6))
+ self.assertTrue(rx_pkt.haslayer(VRRPv3))
+
+ # generate a packet for this VR and compare it to the one received
+ pkt = self.vrrp_adv_packet(prio=prio)
+ self.assertTrue(rx_pkt.haslayer(Ether))
+ self.assertTrue(rx_pkt.haslayer(IPv6))
+ self.assertTrue(rx_pkt.haslayer(VRRPv3))
+
+ self.assertEqual(pkt, rx_pkt)
+
+ def verify_vrrp6_gna(self, pkt, vr):
+ self.assertTrue(pkt.haslayer(Ether))
+ self.assertTrue(pkt.haslayer(IPv6))
+ self.assertTrue(pkt.haslayer(ICMPv6ND_NA))
+ self.assertTrue(pkt.haslayer(ICMPv6NDOptDstLLAddr))
+
+ self.assertEqual(pkt[Ether].dst, "33:33:00:00:00:01")
+
+ self.assertEqual(pkt[IPv6].dst, "ff02::1")
+ # convert addrs to packed format since string versions could differ
+ src_addr = inet_pton(socket.AF_INET6, pkt[IPv6].src)
+ vr_ll_addr = inet_pton(socket.AF_INET6, vr.interface().local_ip6_ll)
+ self.assertEqual(src_addr, vr_ll_addr)
+
+ self.assertTrue(pkt[ICMPv6ND_NA].tgt in vr.virtual_ips())
+ self.assertEqual(pkt[ICMPv6NDOptDstLLAddr].lladdr, vr.virtual_mac())
+
+ # VR with priority 255 owns the virtual address and should
+ # become master and start advertising immediately.
+ def test_vrrp6_master_adv(self):
+ """ IPv6 Master VR advertises """
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ prio = 255
+ intvl = self._default_adv
+ vr = VppVRRPVirtualRouter(self, self.pg0, 100,
+ prio=prio, intvl=intvl,
+ flags=self._default_flags)
+ self._vrs.append(vr)
+
+ vr.add_vpp_config()
+ self.logger.info(self.vapi.cli("show vrrp vr"))
+ vr.start_stop(is_start=1)
+ self.logger.info(self.vapi.cli("show vrrp vr"))
+ vr.start_stop(is_start=0)
+ self.logger.info(self.vapi.cli("show vrrp vr"))
+
+ pkts = self.pg0.get_capture(4, filter_out_fn=None)
+
+ # Init -> Master: Multicast group Join, VRRP adv, gratuitous NAs sent
+ self.verify_vrrp6_mlr(pkts[0], vr)
+ self.verify_vrrp6_adv(pkts[1], vr, prio=prio)
+ self.verify_vrrp6_gna(pkts[2], vr)
+ # Master -> Init: Adv with priority 0 sent to force an election
+ self.verify_vrrp6_adv(pkts[3], vr, prio=0)
+
+ vr.remove_vpp_config()
+ self._vrs = []
+
+ # VR with priority < 255 enters backup state and does not advertise as
+ # long as it receives higher priority advertisements
+ def test_vrrp6_backup_noadv(self):
+ """ IPv6 Backup VR does not advertise """
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ vr_id = 100
+ prio = 100
+ intvl = self._default_adv
+ intvl_s = intvl * 0.01
+ vr = VppVRRPVirtualRouter(self, self.pg0, vr_id,
+ prio=prio, intvl=intvl,
+ flags=self._default_flags,
+ vips=[self.pg0.remote_ip6])
+ vr.add_vpp_config()
+ self._vrs.append(vr)
+
+ vr.start_stop(is_start=1)
+
+ vr.assert_state_equals(VRRP_VR_STATE_BACKUP)
+ # watch for advertisements for 2x the master down preemption timeout
+ end_time = vr.start_time() + 2 * vr.master_down_seconds()
+
+ # Init -> Backup: A multicast listener report should be sent
+ pkts = self.pg0.get_capture(1, filter_out_fn=None)
+
+ # send higher prio advertisements, should not see VPP send any
+ src_ip = self.pg0.remote_ip6_ll
+ num_advs = 5
+ pkts = [self.vrrp_adv_packet(prio=prio+10, src_ip=src_ip)]
+ self.logger.info(self.vapi.cli("show vlib graph"))
+ while time.time() < end_time:
+ self.send_and_assert_no_replies(self.pg0, pkts, timeout=intvl_s)
+ self.logger.info(self.vapi.cli("show trace"))
+ num_advs -= 1
+
+ vr.start_stop(is_start=0)
+ self.logger.info(self.vapi.cli("show vrrp vr"))
+ vr.remove_vpp_config()
+ self._vrs = []
+
+ def test_vrrp6_master_nd(self):
+ """ IPv6 Master VR replies to NDP """
+ self.pg_start()
+
+ # VR virtual IP is the default, which is the pg local IP
+ vr_id = 100
+ prio = 255
+ intvl = self._default_adv
+ vr = VppVRRPVirtualRouter(self, self.pg0, 100,
+ prio=prio, intvl=intvl,
+ flags=self._default_flags)
+ vr.add_vpp_config()
+ self._vrs.append(vr)
+
+ # before the VR is up, NDP should resolve to interface MAC
+ self.pg0.resolve_ndp()
+ self.assertNotEqual(self.pg0.local_mac, vr.virtual_mac())
+
+ # start the VR, NDP should now resolve to virtual MAC
+ vr.start_stop(is_start=1)
+ self.pg0.resolve_ndp()
+ self.assertEqual(self.pg0.local_mac, vr.virtual_mac())
+
+ # stop the VR, ARP should resolve to interface MAC again
+ vr.start_stop(is_start=0)
+ self.pg0.resolve_ndp()
+ self.assertNotEqual(self.pg0.local_mac, vr.virtual_mac())
+
+ vr.remove_vpp_config()
+ self._vrs = []
+
+ def test_vrrp6_backup_nond(self):
+ """ IPv6 Backup VR ignores NDP """
+ # We need an address for a virtual IP that is not the IP that
+ # ARP requests will originate from
+
+ vr_id = 100
+ prio = 100
+ intvl = self._default_adv
+ intvl_s = intvl * 0.01
+ vip = self.pg0.remote_hosts[1].ip6
+ vr = VppVRRPVirtualRouter(self, self.pg0, vr_id,
+ prio=prio, intvl=intvl,
+ flags=self._default_flags,
+ vips=[vip])
+ vr.add_vpp_config()
+ self._vrs.append(vr)
+
+ nsma = in6_getnsma(inet_pton(socket.AF_INET6, vip))
+ dmac = in6_getnsmac(nsma)
+ dst_ip = inet_ntop(socket.AF_INET6, nsma)
+
+ ndp_req = (Ether(dst=dmac, src=self.pg0.remote_mac) /
+ IPv6(dst=dst_ip, src=self.pg0.remote_ip6) /
+ ICMPv6ND_NS(tgt=vip) /
+ ICMPv6NDOptSrcLLAddr(lladdr=self.pg0.remote_mac))
+
+ # Before the VR is started make sure no reply to request for VIP
+ self.send_and_assert_no_replies(self.pg0, [ndp_req], timeout=1)
+
+ # VR should start in backup state and still should not reply to NDP
+ # send a higher priority adv to make sure it does not become master
+ adv = self.vrrp_adv_packet(prio=prio+10, src_ip=self.pg0.remote_ip6)
+ pkts = [adv, ndp_req]
+ vr.start_stop(is_start=1)
+ self.send_and_assert_no_replies(self.pg0, pkts, timeout=intvl_s)
+
+ vr.start_stop(is_start=0)
+
+ def test_vrrp6_election(self):
+ """ IPv6 Backup VR becomes master if no advertisements received """
+
+ vr_id = 100
+ prio = 100
+ intvl = self._default_adv
+ intvl_s = intvl * 0.01
+ vip = self.pg0.remote_ip6
+ vr = VppVRRPVirtualRouter(self, self.pg0, vr_id,
+ prio=prio, intvl=intvl,
+ flags=self._default_flags,
+ vips=[vip])
+ self._vrs.append(vr)
+ vr.add_vpp_config()
+
+ # After adding the VR, it should be in the init state
+ vr.assert_state_equals(VRRP_VR_STATE_INIT)
+
+ self.pg_start()
+ vr.start_stop(is_start=1)
+
+ # VR should be in backup state after starting
+ vr.assert_state_equals(VRRP_VR_STATE_BACKUP)
+ end_time = vr.start_time() + vr.master_down_seconds()
+
+ # no advertisements should arrive until timer expires
+ self.pg0.enable_capture()
+ while (time.time() + intvl_s) < end_time:
+ time.sleep(intvl_s)
+ self.pg0.assert_nothing_captured(filter_out_fn=is_not_adv)
+
+ # VR should be in master state after timer expires
+ self.pg0.enable_capture()
+ self.pg0.wait_for_packet(intvl_s, is_not_adv)
+ vr.assert_state_equals(VRRP_VR_STATE_MASTER)
+
+ def test_vrrp6_backup_preempts(self):
+ """ IPv6 Backup VR preempts lower priority master """
+
+ vr_id = 100
+ prio = 100
+ intvl = self._default_adv
+ intvl_s = intvl * 0.01
+ vip = self.pg0.remote_ip6
+ vr = VppVRRPVirtualRouter(self, self.pg0, vr_id,
+ prio=prio, intvl=intvl,
+ flags=self._default_flags,
+ vips=[vip])
+ self._vrs.append(vr)
+ vr.add_vpp_config()
+
+ # After adding the VR, it should be in the init state
+ vr.assert_state_equals(VRRP_VR_STATE_INIT)
+
+ self.pg_start()
+ vr.start_stop(is_start=1)
+
+ # VR should be in backup state after starting
+ vr.assert_state_equals(VRRP_VR_STATE_BACKUP)
+ end_time = vr.start_time() + vr.master_down_seconds()
+
+ # send lower prio advertisements until timer expires
+ src_ip = self.pg0.remote_ip6
+ pkts = [self.vrrp_adv_packet(prio=prio-10, src_ip=src_ip)]
+ while (time.time() + intvl_s) < end_time:
+ self.send_and_assert_no_replies(self.pg0, pkts, timeout=intvl_s)
+ self.logger.info(self.vapi.cli("show trace"))
+
+ # when timer expires, VR should take over as master
+ self.pg0.enable_capture()
+ self.pg0.wait_for_packet(timeout=intvl_s, filter_out_fn=is_not_adv)
+ vr.assert_state_equals(VRRP_VR_STATE_MASTER)
+
+ def test_vrrp6_master_preempted(self):
+ """ IPv6 Master VR preempted by higher priority backup """
+
+ # A prio 255 VR cannot be preempted so the prio has to be lower and
+ # we have to wait for it to take over
+ vr_id = 100
+ prio = 100
+ intvl = self._default_adv
+ vip = self.pg0.remote_ip6
+ vr = VppVRRPVirtualRouter(self, self.pg0, vr_id,
+ prio=prio, intvl=intvl,
+ flags=self._default_flags,
+ vips=[vip])
+ self._vrs.append(vr)
+ vr.add_vpp_config()
+
+ # After adding the VR, it should be in the init state
+ vr.assert_state_equals(VRRP_VR_STATE_INIT)
+
+ # start VR
+ vr.start_stop(is_start=1)
+ vr.assert_state_equals(VRRP_VR_STATE_BACKUP)
+
+ # wait for VR to take over as master
+ end_time = vr.start_time() + vr.master_down_seconds()
+ sleep_s = end_time - time.time()
+ time.sleep(sleep_s)
+ vr.assert_state_equals(VRRP_VR_STATE_MASTER)
+
+ # Build advertisement packet and send it
+ pkts = [self.vrrp_adv_packet(prio=255, src_ip=self.pg0.remote_ip6)]
+ self.pg_send(self.pg0, pkts)
+
+ # VR should be in backup state again
+ vr.assert_state_equals(VRRP_VR_STATE_BACKUP)
+
+ def test_vrrp6_accept_mode_disabled(self):
+ """ IPv6 Master VR does not reply for VIP w/ accept mode off """
+
+ # accept mode only matters when prio < 255, so it will have to
+ # come up as a backup and take over as master after the timeout
+ vr_id = 100
+ prio = 100
+ intvl = self._default_adv
+ vip = self.pg0.remote_hosts[4].ip6
+ vr = VppVRRPVirtualRouter(self, self.pg0, vr_id,
+ prio=prio, intvl=intvl,
+ flags=self._default_flags,
+ vips=[vip])
+ self._vrs.append(vr)
+ vr.add_vpp_config()
+
+ # After adding the VR, it should be in the init state
+ vr.assert_state_equals(VRRP_VR_STATE_INIT)
+
+ # start VR
+ vr.start_stop(is_start=1)
+ vr.assert_state_equals(VRRP_VR_STATE_BACKUP)
+
+ # wait for VR to take over as master
+ end_time = vr.start_time() + vr.master_down_seconds()
+ sleep_s = end_time - time.time()
+ time.sleep(sleep_s)
+ vr.assert_state_equals(VRRP_VR_STATE_MASTER)
+
+ # send an ICMPv6 echo to the VR virtual IP address
+ echo = (Ether(dst=vr.virtual_mac(), src=self.pg0.remote_mac) /
+ IPv6(dst=vip, src=self.pg0.remote_ip6) /
+ ICMPv6EchoRequest(seq=1, id=self.pg0.sw_if_index))
+ self.pg_send(self.pg0, [echo])
+
+ # wait for an echo reply. none should be received
+ time.sleep(1)
+ self.pg0.assert_nothing_captured(filter_out_fn=is_not_echo_reply)
+
+ def test_vrrp6_accept_mode_enabled(self):
+ """ IPv6 Master VR replies for VIP w/ accept mode on """
+
+ # A prio 255 VR cannot be preempted so the prio has to be lower and
+ # we have to wait for it to take over
+ vr_id = 100
+ prio = 100
+ intvl = self._default_adv
+ vip = self.pg0.remote_hosts[4].ip6
+ flags = (self._default_flags | VRRP_VR_FLAG_ACCEPT)
+ vr = VppVRRPVirtualRouter(self, self.pg0, vr_id,
+ prio=prio, intvl=intvl,
+ flags=flags,
+ vips=[vip])
+ self._vrs.append(vr)
+ vr.add_vpp_config()
+
+ # After adding the VR, it should be in the init state
+ vr.assert_state_equals(VRRP_VR_STATE_INIT)
+
+ # start VR
+ vr.start_stop(is_start=1)
+ vr.assert_state_equals(VRRP_VR_STATE_BACKUP)
+
+ # wait for VR to take over as master
+ end_time = vr.start_time() + vr.master_down_seconds()
+ sleep_s = end_time - time.time()
+ time.sleep(sleep_s)
+ vr.assert_state_equals(VRRP_VR_STATE_MASTER)
+
+ # send an ICMP echo to the VR virtual IP address
+ echo = (Ether(dst=vr.virtual_mac(), src=self.pg0.remote_mac) /
+ IPv6(dst=vip, src=self.pg0.remote_ip6) /
+ ICMPv6EchoRequest(seq=1, id=self.pg0.sw_if_index))
+ self.pg_send(self.pg0, [echo])
+
+ # wait for an echo reply.
+ time.sleep(1)
+ rx_pkts = self.pg0.get_capture(expected_count=1, timeout=1,
+ filter_out_fn=is_not_echo_reply)
+
+ self.assertEqual(rx_pkts[0][IPv6].src, vip)
+ self.assertEqual(rx_pkts[0][IPv6].dst, self.pg0.remote_ip6)
+ self.assertEqual(rx_pkts[0][ICMPv6EchoReply].seq, 1)
+ self.assertEqual(rx_pkts[0][ICMPv6EchoReply].id, self.pg0.sw_if_index)
+
+ def test_vrrp6_intf_tracking(self):
+ """ IPv6 Master VR adjusts priority based on tracked interface """
+
+ vr_id = 100
+ prio = 255
+ intvl = self._default_adv
+ intvl_s = intvl * 0.01
+ vip = self.pg0.local_ip6
+ vr = VppVRRPVirtualRouter(self, self.pg0, vr_id,
+ prio=prio, intvl=intvl,
+ flags=self._default_flags,
+ vips=[vip])
+ self._vrs.append(vr)
+ vr.add_vpp_config()
+
+ # After adding the VR, it should be in the init state
+ vr.assert_state_equals(VRRP_VR_STATE_INIT)
+
+ # add pg1 as a tracked interface and start the VR
+ adjustment = 50
+ adjusted_prio = prio - adjustment
+ vr.add_del_tracked_interface(is_add=1,
+ sw_if_index=self.pg1.sw_if_index,
+ prio=adjustment)
+ vr.start_stop(is_start=1)
+ vr.assert_state_equals(VRRP_VR_STATE_MASTER)
+
+ adv_configured = self.vrrp_adv_packet(prio=prio)
+ adv_adjusted = self.vrrp_adv_packet(prio=adjusted_prio)
+
+ # tracked intf is up -> advertised priority == configured priority
+ self.pg0.enable_capture()
+ rx = self.pg0.wait_for_packet(timeout=intvl_s,
+ filter_out_fn=is_not_adv)
+ self.assertEqual(rx, adv_configured)
+
+ # take down pg1, verify priority is now being adjusted
+ self.pg1.admin_down()
+ self.pg0.enable_capture()
+ rx = self.pg0.wait_for_packet(timeout=intvl_s,
+ filter_out_fn=is_not_adv)
+ self.assertEqual(rx, adv_adjusted)
+
+ # bring up pg1, verify priority now matches configured value
+ self.pg1.admin_up()
+ self.pg0.enable_capture()
+ rx = self.pg0.wait_for_packet(timeout=intvl_s,
+ filter_out_fn=is_not_adv)
+ self.assertEqual(rx, adv_configured)
+
+ # remove IP address from pg1, verify priority now being adjusted
+ self.pg1.unconfig_ip6()
+ self.pg0.enable_capture()
+ rx = self.pg0.wait_for_packet(timeout=intvl_s,
+ filter_out_fn=is_not_adv)
+ self.assertEqual(rx, adv_adjusted)
+
+ # add IP address to pg1, verify priority now matches configured value
+ self.pg1.config_ip6()
+ self.pg0.enable_capture()
+ rx = self.pg0.wait_for_packet(timeout=intvl_s,
+ filter_out_fn=is_not_adv)
+ self.assertEqual(rx, adv_configured)
+
+ def test_vrrp6_master_adv_unicast(self):
+ """ IPv6 Master VR advertises (unicast) """
+
+ vr_id = 100
+ prio = 255
+ intvl = self._default_adv
+ intvl_s = intvl * 0.01
+ vip = self.pg0.local_ip6
+ flags = (self._default_flags | VRRP_VR_FLAG_UNICAST)
+ unicast_peer = self.pg0.remote_hosts[4]
+ vr = VppVRRPVirtualRouter(self, self.pg0, vr_id,
+ prio=prio, intvl=intvl,
+ flags=flags,
+ vips=[vip])
+ self._vrs.append(vr)
+ vr.add_vpp_config()
+ vr.set_unicast_peers([unicast_peer.ip6])
+
+ # After adding the VR, it should be in the init state
+ vr.assert_state_equals(VRRP_VR_STATE_INIT)
+
+ # Start VR, transition to master
+ vr.start_stop(is_start=1)
+ vr.assert_state_equals(VRRP_VR_STATE_MASTER)
+
+ self.pg0.enable_capture()
+ rx = self.pg0.wait_for_packet(timeout=intvl_s,
+ filter_out_fn=is_not_adv)
+
+ self.assertTrue(rx.haslayer(Ether))
+ self.assertTrue(rx.haslayer(IPv6))
+ self.assertTrue(rx.haslayer(VRRPv3))
+ self.assertEqual(rx[Ether].src, self.pg0.local_mac)
+ self.assertEqual(rx[Ether].dst, unicast_peer.mac)
+ self.assertEqual(ip6_normalize(rx[IPv6].src),
+ ip6_normalize(self.pg0.local_ip6_ll))
+ self.assertEqual(ip6_normalize(rx[IPv6].dst),
+ ip6_normalize(unicast_peer.ip6))
+ self.assertEqual(rx[VRRPv3].vrid, vr_id)
+ self.assertEqual(rx[VRRPv3].priority, prio)
+ self.assertEqual(rx[VRRPv3].ipcount, 1)
+ self.assertEqual(rx[VRRPv3].addrlist, [vip])
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_vxlan.py b/test/test_vxlan.py
new file mode 100644
index 00000000000..028275ccedf
--- /dev/null
+++ b/test/test_vxlan.py
@@ -0,0 +1,421 @@
+#!/usr/bin/env python3
+
+import socket
+from util import ip4_range, reassemble4
+import unittest
+from framework import VppTestCase, VppTestRunner
+from template_bd import BridgeDomain
+
+from scapy.layers.l2 import Ether
+from scapy.packet import Raw, bind_layers
+from scapy.layers.inet import IP, UDP
+from scapy.layers.vxlan import VXLAN
+
+import util
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_vxlan_tunnel import VppVxlanTunnel
+from vpp_ip import INVALID_INDEX
+
+
+class TestVxlan(BridgeDomain, VppTestCase):
+ """ VXLAN Test Case """
+
+ def __init__(self, *args):
+ BridgeDomain.__init__(self)
+ VppTestCase.__init__(self, *args)
+
+ def encapsulate(self, pkt, vni):
+ """
+ Encapsulate the original payload frame by adding VXLAN header with its
+ UDP, IP and Ethernet fields
+ """
+ return (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
+ UDP(sport=self.dport, dport=self.dport, chksum=0) /
+ VXLAN(vni=vni, flags=self.flags) /
+ pkt)
+
+ def ip_range(self, start, end):
+ """ range of remote ip's """
+ return ip4_range(self.pg0.remote_ip4, start, end)
+
+ def encap_mcast(self, pkt, src_ip, src_mac, vni):
+ """
+ Encapsulate the original payload frame by adding VXLAN header with its
+ UDP, IP and Ethernet fields
+ """
+ return (Ether(src=src_mac, dst=self.mcast_mac) /
+ IP(src=src_ip, dst=self.mcast_ip4) /
+ UDP(sport=self.dport, dport=self.dport, chksum=0) /
+ VXLAN(vni=vni, flags=self.flags) /
+ pkt)
+
+ def decapsulate(self, pkt):
+ """
+ Decapsulate the original payload frame by removing VXLAN header
+ """
+ # check if is set I flag
+ self.assertEqual(pkt[VXLAN].flags, int('0x8', 16))
+ return pkt[VXLAN].payload
+
+ # Method for checking VXLAN encapsulation.
+ #
+ def check_encapsulation(self, pkt, vni, local_only=False, mcast_pkt=False):
+ # TODO: add error messages
+ # Verify source MAC is VPP_MAC and destination MAC is MY_MAC resolved
+ # by VPP using ARP.
+ self.assertEqual(pkt[Ether].src, self.pg0.local_mac)
+ if not local_only:
+ if not mcast_pkt:
+ self.assertEqual(pkt[Ether].dst, self.pg0.remote_mac)
+ else:
+ self.assertEqual(pkt[Ether].dst, type(self).mcast_mac)
+ # Verify VXLAN tunnel source IP is VPP_IP and destination IP is MY_IP.
+ self.assertEqual(pkt[IP].src, self.pg0.local_ip4)
+ if not local_only:
+ if not mcast_pkt:
+ self.assertEqual(pkt[IP].dst, self.pg0.remote_ip4)
+ else:
+ self.assertEqual(pkt[IP].dst, type(self).mcast_ip4)
+ # Verify UDP destination port is VXLAN 4789, source UDP port could be
+ # arbitrary.
+ self.assertEqual(pkt[UDP].dport, self.dport)
+ # Verify UDP checksum
+ self.assert_udp_checksum_valid(pkt)
+ # Verify VNI
+ self.assertEqual(pkt[VXLAN].vni, vni)
+
+ @classmethod
+ def create_vxlan_flood_test_bd(cls, vni, n_ucast_tunnels, port):
+ # Create 10 ucast vxlan tunnels under bd
+ ip_range_start = 10
+ ip_range_end = ip_range_start + n_ucast_tunnels
+ next_hop_address = cls.pg0.remote_ip4
+ for dest_ip4 in ip4_range(next_hop_address, ip_range_start,
+ ip_range_end):
+ # add host route so dest_ip4 will not be resolved
+ rip = VppIpRoute(cls, dest_ip4, 32,
+ [VppRoutePath(next_hop_address,
+ INVALID_INDEX)],
+ register=False)
+ rip.add_vpp_config()
+
+ r = VppVxlanTunnel(cls, src=cls.pg0.local_ip4,
+ src_port=port, dst_port=port,
+ dst=dest_ip4, vni=vni)
+ r.add_vpp_config()
+ cls.vapi.sw_interface_set_l2_bridge(r.sw_if_index, bd_id=vni)
+
+ @classmethod
+ def add_del_shared_mcast_dst_load(cls, port, is_add):
+ """
+ add or del tunnels sharing the same mcast dst
+ to test vxlan ref_count mechanism
+ """
+ n_shared_dst_tunnels = 20
+ vni_start = 10000
+ vni_end = vni_start + n_shared_dst_tunnels
+ for vni in range(vni_start, vni_end):
+ r = VppVxlanTunnel(cls, src=cls.pg0.local_ip4,
+ src_port=port, dst_port=port,
+ dst=cls.mcast_ip4, mcast_sw_if_index=1, vni=vni)
+ if is_add:
+ r.add_vpp_config()
+ if r.sw_if_index == 0xffffffff:
+ raise ValueError("bad sw_if_index: ~0")
+ else:
+ r.remove_vpp_config()
+
+ @classmethod
+ def add_shared_mcast_dst_load(cls, port):
+ cls.add_del_shared_mcast_dst_load(port=port, is_add=1)
+
+ @classmethod
+ def del_shared_mcast_dst_load(cls, port):
+ cls.add_del_shared_mcast_dst_load(port=port, is_add=0)
+
+ @classmethod
+ def add_del_mcast_tunnels_load(cls, port, is_add):
+ """
+ add or del tunnels to test vxlan stability
+ """
+ n_distinct_dst_tunnels = 200
+ ip_range_start = 10
+ ip_range_end = ip_range_start + n_distinct_dst_tunnels
+ for dest_ip4 in ip4_range(cls.mcast_ip4, ip_range_start,
+ ip_range_end):
+ vni = bytearray(socket.inet_pton(socket.AF_INET, dest_ip4))[3]
+ r = VppVxlanTunnel(cls, src=cls.pg0.local_ip4,
+ src_port=port, dst_port=port,
+ dst=dest_ip4, mcast_sw_if_index=1, vni=vni)
+ if is_add:
+ r.add_vpp_config()
+ else:
+ r.remove_vpp_config()
+
+ @classmethod
+ def add_mcast_tunnels_load(cls, port):
+ cls.add_del_mcast_tunnels_load(port=port, is_add=1)
+
+ @classmethod
+ def del_mcast_tunnels_load(cls, port):
+ cls.add_del_mcast_tunnels_load(port=port, is_add=0)
+
+ # Class method to start the VXLAN test case.
+ # Overrides setUpClass method in VppTestCase class.
+ # Python try..except statement is used to ensure that the tear down of
+ # the class will be executed even if exception is raised.
+ # @param cls The class pointer.
+ @classmethod
+ def setUpClass(cls):
+ super(TestVxlan, cls).setUpClass()
+
+ try:
+ cls.flags = 0x8
+
+ # Create 2 pg interfaces.
+ cls.create_pg_interfaces(range(4))
+ for pg in cls.pg_interfaces:
+ pg.admin_up()
+
+ # Configure IPv4 addresses on VPP pg0.
+ cls.pg0.config_ip4()
+
+ # Resolve MAC address for VPP's IP address on pg0.
+ cls.pg0.resolve_arp()
+
+ # Our Multicast address
+ cls.mcast_ip4 = '239.1.1.1'
+ cls.mcast_mac = util.mcast_ip_to_mac(cls.mcast_ip4)
+ except Exception:
+ cls.tearDownClass()
+ raise
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestVxlan, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestVxlan, self).setUp()
+
+ def createVxLANInterfaces(self, port=4789):
+ # Create VXLAN VTEP on VPP pg0, and put vxlan_tunnel0 and pg1
+ # into BD.
+ self.dport = port
+
+ self.single_tunnel_vni = 0x12345
+ self.single_tunnel_bd = 1
+ r = VppVxlanTunnel(self, src=self.pg0.local_ip4,
+ dst=self.pg0.remote_ip4,
+ src_port=self.dport, dst_port=self.dport,
+ vni=self.single_tunnel_vni)
+ r.add_vpp_config()
+ self.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
+ bd_id=self.single_tunnel_bd)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=self.pg1.sw_if_index, bd_id=self.single_tunnel_bd)
+
+ # Setup vni 2 to test multicast flooding
+ self.n_ucast_tunnels = 10
+ self.mcast_flood_bd = 2
+ self.create_vxlan_flood_test_bd(self.mcast_flood_bd,
+ self.n_ucast_tunnels,
+ self.dport)
+ r = VppVxlanTunnel(self, src=self.pg0.local_ip4, dst=self.mcast_ip4,
+ src_port=self.dport, dst_port=self.dport,
+ mcast_sw_if_index=1, vni=self.mcast_flood_bd)
+ r.add_vpp_config()
+ self.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
+ bd_id=self.mcast_flood_bd)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=self.pg2.sw_if_index, bd_id=self.mcast_flood_bd)
+
+ # Add and delete mcast tunnels to check stability
+ self.add_shared_mcast_dst_load(self.dport)
+ self.add_mcast_tunnels_load(self.dport)
+ self.del_shared_mcast_dst_load(self.dport)
+ self.del_mcast_tunnels_load(self.dport)
+
+ # Setup vni 3 to test unicast flooding
+ self.ucast_flood_bd = 3
+ self.create_vxlan_flood_test_bd(self.ucast_flood_bd,
+ self.n_ucast_tunnels,
+ self.dport)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=self.pg3.sw_if_index, bd_id=self.ucast_flood_bd)
+
+ # Set scapy listen custom port for VxLAN
+ bind_layers(UDP, VXLAN, dport=self.dport)
+
+ def encap_big_packet(self):
+ self.vapi.sw_interface_set_mtu(self.pg0.sw_if_index, [1500, 0, 0, 0])
+
+ frame = (Ether(src='00:00:00:00:00:02', dst='00:00:00:00:00:01') /
+ IP(src='4.3.2.1', dst='1.2.3.4') /
+ UDP(sport=20000, dport=10000) /
+ Raw(b'\xa5' * 1450))
+
+ self.pg1.add_stream([frame])
+
+ self.pg0.enable_capture()
+
+ self.pg_start()
+
+ # Pick first received frame and check if it's correctly encapsulated.
+ out = self.pg0.get_capture(2)
+ ether = out[0]
+ pkt = reassemble4(out)
+ pkt = ether / pkt
+ self.check_encapsulation(pkt, self.single_tunnel_vni)
+
+ payload = self.decapsulate(pkt)
+ # TODO: Scapy bug?
+ # self.assert_eq_pkts(payload, frame)
+
+ """
+ Tests with default port (4789)
+ """
+ def test_decap(self):
+ """ Decapsulation test
+ from BridgeDoman
+ """
+ self.createVxLANInterfaces()
+ super(TestVxlan, self).test_decap()
+
+ def test_encap(self):
+ """ Encapsulation test
+ from BridgeDoman
+ """
+ self.createVxLANInterfaces()
+ super(TestVxlan, self).test_encap()
+
+ def test_encap_big_packet(self):
+ """ Encapsulation test send big frame from pg1
+ Verify receipt of encapsulated frames on pg0
+ """
+ self.createVxLANInterfaces()
+ self.encap_big_packet()
+
+ def test_ucast_flood(self):
+ """ Unicast flood test
+ from BridgeDoman
+ """
+ self.createVxLANInterfaces()
+ super(TestVxlan, self).test_ucast_flood()
+
+ def test_mcast_flood(self):
+ """ Multicast flood test
+ from BridgeDoman
+ """
+ self.createVxLANInterfaces()
+ super(TestVxlan, self).test_mcast_flood()
+
+ def test_mcast_rcv(self):
+ """ Multicast receive test
+ from BridgeDoman
+ """
+ self.createVxLANInterfaces()
+ super(TestVxlan, self).test_mcast_rcv()
+
+ """
+ Tests with custom port
+ """
+ def test_decap_custom_port(self):
+ """ Decapsulation test custom port
+ from BridgeDoman
+ """
+ self.createVxLANInterfaces(1111)
+ super(TestVxlan, self).test_decap()
+
+ def test_encap_custom_port(self):
+ """ Encapsulation test custom port
+ from BridgeDoman
+ """
+ self.createVxLANInterfaces(1111)
+ super(TestVxlan, self).test_encap()
+
+ def test_ucast_flood_custom_port(self):
+ """ Unicast flood test custom port
+ from BridgeDoman
+ """
+ self.createVxLANInterfaces(1111)
+ super(TestVxlan, self).test_ucast_flood()
+
+ def test_mcast_flood_custom_port(self):
+ """ Multicast flood test custom port
+ from BridgeDoman
+ """
+ self.createVxLANInterfaces(1111)
+ super(TestVxlan, self).test_mcast_flood()
+
+ def test_mcast_rcv_custom_port(self):
+ """ Multicast receive test custom port
+ from BridgeDoman
+ """
+ self.createVxLANInterfaces(1111)
+ super(TestVxlan, self).test_mcast_rcv()
+
+ # Method to define VPP actions before tear down of the test case.
+ # Overrides tearDown method in VppTestCase class.
+ # @param self The object pointer.
+
+ def tearDown(self):
+ super(TestVxlan, self).tearDown()
+
+ def show_commands_at_teardown(self):
+ self.logger.info(self.vapi.cli("show bridge-domain 1 detail"))
+ self.logger.info(self.vapi.cli("show bridge-domain 2 detail"))
+ self.logger.info(self.vapi.cli("show bridge-domain 3 detail"))
+ self.logger.info(self.vapi.cli("show vxlan tunnel"))
+
+
+class TestVxlan2(VppTestCase):
+ """ VXLAN Test Case """
+ def setUp(self):
+ super(TestVxlan2, self).setUp()
+
+ # Create 2 pg interfaces.
+ self.create_pg_interfaces(range(4))
+ for pg in self.pg_interfaces:
+ pg.admin_up()
+
+ # Configure IPv4 addresses on VPP pg0.
+ self.pg0.config_ip4()
+ self.pg0.resolve_arp()
+
+ def tearDown(self):
+ super(TestVxlan2, self).tearDown()
+
+ def test_xconnect(self):
+ """ VXLAN source address not local """
+
+ #
+ # test the broken configuration of a VXLAN tunnel whose
+ # source address is not local ot the box. packets sent
+ # through the tunnel should be dropped
+ #
+ t = VppVxlanTunnel(self,
+ src="10.0.0.5",
+ dst=self.pg0.local_ip4,
+ vni=1000)
+ t.add_vpp_config()
+ t.admin_up()
+
+ self.vapi.sw_interface_set_l2_xconnect(t.sw_if_index,
+ self.pg1.sw_if_index,
+ enable=1)
+ self.vapi.sw_interface_set_l2_xconnect(self.pg1.sw_if_index,
+ t.sw_if_index,
+ enable=1)
+
+ p = (Ether(src="00:11:22:33:44:55",
+ dst="00:00:00:11:22:33") /
+ IP(src="4.3.2.1", dst="1.2.3.4") /
+ UDP(sport=20000, dport=10000) /
+ Raw(b'\xa5' * 1450))
+
+ rx = self.send_and_assert_no_replies(self.pg1, [p])
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_vxlan6.py b/test/test_vxlan6.py
new file mode 100644
index 00000000000..123cce9b7ba
--- /dev/null
+++ b/test/test_vxlan6.py
@@ -0,0 +1,316 @@
+#!/usr/bin/env python3
+
+import socket
+import unittest
+from framework import VppTestCase, VppTestRunner
+from template_bd import BridgeDomain
+
+from scapy.layers.l2 import Ether
+from scapy.packet import Raw, bind_layers
+from scapy.layers.inet6 import IP, IPv6, UDP
+from scapy.layers.vxlan import VXLAN
+
+import util
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_vxlan_tunnel import VppVxlanTunnel
+from vpp_ip import INVALID_INDEX
+
+
+class TestVxlan6(BridgeDomain, VppTestCase):
+ """ VXLAN over IPv6 Test Case """
+
+ def __init__(self, *args):
+ BridgeDomain.__init__(self)
+ VppTestCase.__init__(self, *args)
+
+ def encapsulate(self, pkt, vni):
+ """
+ Encapsulate the original payload frame by adding VXLAN header with its
+ UDP, IP and Ethernet fields
+ """
+ return (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IPv6(src=self.pg0.remote_ip6, dst=self.pg0.local_ip6) /
+ UDP(sport=self.dport, dport=self.dport, chksum=0) /
+ VXLAN(vni=vni, flags=self.flags) /
+ pkt)
+
+ @classmethod
+ def ip_range(cls, s, e):
+ """ range of remote ip's """
+ tmp = cls.pg0.remote_ip6.rsplit(':', 1)[0]
+ return ("%s:%x" % (tmp, i) for i in range(s, e))
+
+ def encap_mcast(self, pkt, src_ip, src_mac, vni):
+ """
+ Encapsulate the original payload frame by adding VXLAN header with its
+ UDP, IP and Ethernet fields
+ """
+ return (Ether(src=src_mac, dst=self.mcast_mac) /
+ IPv6(src=src_ip, dst=self.mcast_ip6) /
+ UDP(sport=self.dport, dport=self.dport, chksum=0) /
+ VXLAN(vni=vni, flags=self.flags) /
+ pkt)
+
+ def decapsulate(self, pkt):
+ """
+ Decapsulate the original payload frame by removing VXLAN header
+ """
+ # check if is set I flag
+ self.assertEqual(pkt[VXLAN].flags, int('0x8', 16))
+ return pkt[VXLAN].payload
+
+ # Method for checking VXLAN encapsulation.
+ #
+ def check_encapsulation(self, pkt, vni, local_only=False, mcast_pkt=False):
+ # TODO: add error messages
+ # Verify source MAC is VPP_MAC and destination MAC is MY_MAC resolved
+ # by VPP using ARP.
+ self.assertEqual(pkt[Ether].src, self.pg0.local_mac)
+ if not local_only:
+ if not mcast_pkt:
+ self.assertEqual(pkt[Ether].dst, self.pg0.remote_mac)
+ else:
+ self.assertEqual(pkt[Ether].dst, type(self).mcast_mac)
+ # Verify VXLAN tunnel source IP is VPP_IP and destination IP is MY_IP.
+ self.assertEqual(pkt[IPv6].src, self.pg0.local_ip6)
+ if not local_only:
+ if not mcast_pkt:
+ self.assertEqual(pkt[IPv6].dst, self.pg0.remote_ip6)
+ else:
+ self.assertEqual(pkt[IPv6].dst, type(self).mcast_ip6)
+ # Verify UDP destination port is VXLAN 4789, source UDP port could be
+ # arbitrary.
+ self.assertEqual(pkt[UDP].dport, self.dport)
+ # Verify UDP checksum
+ self.assert_udp_checksum_valid(pkt, ignore_zero_checksum=False)
+ # Verify VNI
+ self.assertEqual(pkt[VXLAN].vni, vni)
+
+ @classmethod
+ def create_vxlan_flood_test_bd(cls, vni, n_ucast_tunnels, port):
+ # Create 10 ucast vxlan tunnels under bd
+ start = 10
+ end = start + n_ucast_tunnels
+ for dest_ip6 in cls.ip_range(start, end):
+ # add host route so dest ip will not be resolved
+ rip = VppIpRoute(cls, dest_ip6, 128,
+ [VppRoutePath(cls.pg0.remote_ip6, INVALID_INDEX)],
+ register=False)
+ rip.add_vpp_config()
+ r = VppVxlanTunnel(cls, src=cls.pg0.local_ip6,
+ src_port=port, dst_port=port,
+ dst=dest_ip6, vni=vni)
+ r.add_vpp_config()
+ cls.vapi.sw_interface_set_l2_bridge(r.sw_if_index, bd_id=vni)
+
+ @classmethod
+ def add_mcast_tunnels_load(cls):
+ cls.add_del_mcast_tunnels_load(is_add=1)
+
+ @classmethod
+ def del_mcast_tunnels_load(cls):
+ cls.add_del_mcast_tunnels_load(is_add=0)
+
+ # Class method to start the VXLAN test case.
+ # Overrides setUpClass method in VppTestCase class.
+ # Python try..except statement is used to ensure that the tear down of
+ # the class will be executed even if exception is raised.
+ # @param cls The class pointer.
+ @classmethod
+ def setUpClass(cls):
+ super(TestVxlan6, cls).setUpClass()
+
+ try:
+ cls.flags = 0x8
+
+ # Create 2 pg interfaces.
+ cls.create_pg_interfaces(range(4))
+ for pg in cls.pg_interfaces:
+ pg.admin_up()
+
+ # Configure IPv6 addresses on VPP pg0.
+ cls.pg0.config_ip6()
+
+ # Resolve MAC address for VPP's IP address on pg0.
+ cls.pg0.resolve_ndp()
+
+ # Our Multicast address
+ cls.mcast_ip6 = 'ff0e::1'
+ cls.mcast_mac = util.mcast_ip_to_mac(cls.mcast_ip6)
+ except Exception:
+ super(TestVxlan6, cls).tearDownClass()
+ raise
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestVxlan6, cls).tearDownClass()
+
+ def setUp(self):
+ super(TestVxlan6, self).setUp()
+
+ def createVxLANInterfaces(self, port=4789):
+ # Create VXLAN VTEP on VPP pg0, and put vxlan_tunnel0 and pg1
+ # into BD.
+ self.dport = port
+
+ self.single_tunnel_vni = 0x12345
+ self.single_tunnel_bd = 1
+ r = VppVxlanTunnel(self, src=self.pg0.local_ip6,
+ dst=self.pg0.remote_ip6,
+ src_port=self.dport, dst_port=self.dport,
+ vni=self.single_tunnel_vni)
+ r.add_vpp_config()
+ self.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
+ bd_id=self.single_tunnel_bd)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=self.pg1.sw_if_index, bd_id=self.single_tunnel_bd)
+
+ # Setup vni 2 to test multicast flooding
+ self.n_ucast_tunnels = 10
+ self.mcast_flood_bd = 2
+ self.create_vxlan_flood_test_bd(self.mcast_flood_bd,
+ self.n_ucast_tunnels,
+ self.dport)
+ r = VppVxlanTunnel(self, src=self.pg0.local_ip6, dst=self.mcast_ip6,
+ src_port=self.dport, dst_port=self.dport,
+ mcast_sw_if_index=1, vni=self.mcast_flood_bd)
+ r.add_vpp_config()
+ self.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
+ bd_id=self.mcast_flood_bd)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=self.pg2.sw_if_index, bd_id=self.mcast_flood_bd)
+
+ # Setup vni 3 to test unicast flooding
+ self.ucast_flood_bd = 3
+ self.create_vxlan_flood_test_bd(self.ucast_flood_bd,
+ self.n_ucast_tunnels,
+ self.dport)
+ self.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=self.pg3.sw_if_index, bd_id=self.ucast_flood_bd)
+
+ # Set scapy listen custom port for VxLAN
+ bind_layers(UDP, VXLAN, dport=self.dport)
+
+ # Method to define VPP actions before tear down of the test case.
+ # Overrides tearDown method in VppTestCase class.
+ # @param self The object pointer.
+ def tearDown(self):
+ super(TestVxlan6, self).tearDown()
+
+ def show_commands_at_teardown(self):
+ self.logger.info(self.vapi.cli("show bridge-domain 1 detail"))
+ self.logger.info(self.vapi.cli("show bridge-domain 2 detail"))
+ self.logger.info(self.vapi.cli("show bridge-domain 3 detail"))
+ self.logger.info(self.vapi.cli("show vxlan tunnel"))
+
+ def encap_fragmented_packet(self):
+ frame = (Ether(src='00:00:00:00:00:02', dst='00:00:00:00:00:01') /
+ IP(src='4.3.2.1', dst='1.2.3.4') /
+ UDP(sport=20000, dport=10000) /
+ Raw(b'\xa5' * 1000))
+
+ frags = util.fragment_rfc791(frame, 400)
+
+ self.pg1.add_stream(frags)
+
+ self.pg0.enable_capture()
+
+ self.pg_start()
+
+ out = self.pg0.get_capture(3)
+
+ payload = []
+ for pkt in out:
+ payload.append(self.decapsulate(pkt))
+ self.check_encapsulation(pkt, self.single_tunnel_vni)
+
+ reassembled = util.reassemble4(payload)
+
+ self.assertEqual(Ether(raw(frame))[IP], reassembled[IP])
+
+ """
+ Tests with default port (4789)
+ """
+ def test_decap(self):
+ """ Decapsulation test
+ from BridgeDoman
+ """
+ self.createVxLANInterfaces()
+ super(TestVxlan6, self).test_decap()
+
+ def test_encap(self):
+ """ Encapsulation test
+ from BridgeDoman
+ """
+ self.createVxLANInterfaces()
+ super(TestVxlan6, self).test_encap()
+
+ def test_encap_fragmented_packet(self):
+ """ Encapsulation test send fragments from pg1
+ Verify receipt of encapsulated frames on pg0
+ """
+ self.createVxLANInterfaces()
+ self.encap_fragmented_packet()
+
+ def test_ucast_flood(self):
+ """ Unicast flood test
+ from BridgeDoman
+ """
+ self.createVxLANInterfaces()
+ super(TestVxlan6, self).test_ucast_flood()
+
+ def test_mcast_flood(self):
+ """ Multicast flood test
+ from BridgeDoman
+ """
+ self.createVxLANInterfaces()
+ super(TestVxlan6, self).test_mcast_flood()
+
+ def test_mcast_rcv(self):
+ """ Multicast receive test
+ from BridgeDoman
+ """
+ self.createVxLANInterfaces()
+ super(TestVxlan6, self).test_mcast_rcv()
+
+ """
+ Tests with custom port
+ """
+ def test_decap_custom_port(self):
+ """ Decapsulation test custom port
+ from BridgeDoman
+ """
+ self.createVxLANInterfaces(1111)
+ super(TestVxlan6, self).test_decap()
+
+ def test_encap_custom_port(self):
+ """ Encapsulation test custom port
+ from BridgeDoman
+ """
+ self.createVxLANInterfaces(1111)
+ super(TestVxlan6, self).test_encap()
+
+ def test_ucast_flood_custom_port(self):
+ """ Unicast flood test custom port
+ from BridgeDoman
+ """
+ self.createVxLANInterfaces(1111)
+ super(TestVxlan6, self).test_ucast_flood()
+
+ def test_mcast_flood_custom_port(self):
+ """ Multicast flood test custom port
+ from BridgeDoman
+ """
+ self.createVxLANInterfaces(1111)
+ super(TestVxlan6, self).test_mcast_flood()
+
+ def test_mcast_rcv_custom_port(self):
+ """ Multicast receive test custom port
+ from BridgeDoman
+ """
+ self.createVxLANInterfaces(1111)
+ super(TestVxlan6, self).test_mcast_rcv()
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_vxlan_gbp.py b/test/test_vxlan_gbp.py
new file mode 100644
index 00000000000..f332aced7d8
--- /dev/null
+++ b/test/test_vxlan_gbp.py
@@ -0,0 +1,293 @@
+#!/usr/bin/env python3
+
+import socket
+from util import ip4_range, reassemble4_ether
+import unittest
+from framework import VppTestCase, VppTestRunner
+from template_bd import BridgeDomain
+
+from scapy.layers.l2 import Ether
+from scapy.packet import Raw
+from scapy.layers.inet import IP, UDP
+from scapy.layers.vxlan import VXLAN
+
+from vpp_ip_route import VppIpRoute, VppRoutePath
+from vpp_ip import INVALID_INDEX
+
+
+class TestVxlanGbp(VppTestCase):
+ """ VXLAN GBP Test Case """
+
+ @property
+ def frame_request(self):
+ """ Ethernet frame modeling a generic request """
+ return (Ether(src='00:00:00:00:00:01', dst='00:00:00:00:00:02') /
+ IP(src='1.2.3.4', dst='4.3.2.1') /
+ UDP(sport=10000, dport=20000) /
+ Raw(b'\xa5' * 100))
+
+ @property
+ def frame_reply(self):
+ """ Ethernet frame modeling a generic reply """
+ return (Ether(src='00:00:00:00:00:02', dst='00:00:00:00:00:01') /
+ IP(src='4.3.2.1', dst='1.2.3.4') /
+ UDP(sport=20000, dport=10000) /
+ Raw(b'\xa5' * 100))
+
+ def encapsulate(self, pkt, vni):
+ """
+ Encapsulate the original payload frame by adding VXLAN GBP header with
+ its UDP, IP and Ethernet fields
+ """
+ return (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
+ UDP(sport=self.dport, dport=self.dport, chksum=0) /
+ VXLAN(vni=vni, flags=self.flags, gpflags=self.gpflags,
+ gpid=self.sclass) / pkt)
+
+ def ip_range(self, start, end):
+ """ range of remote ip's """
+ return ip4_range(self.pg0.remote_ip4, start, end)
+
+ def decapsulate(self, pkt):
+ """
+ Decapsulate the original payload frame by removing VXLAN header
+ """
+ # check if is set G and I flag
+ self.assertEqual(pkt[VXLAN].flags, int('0x88', 16))
+ return pkt[VXLAN].payload
+
+ # Method for checking VXLAN GBP encapsulation.
+ #
+ def check_encapsulation(self, pkt, vni, local_only=False, mcast_pkt=False):
+ # TODO: add error messages
+ # Verify source MAC is VPP_MAC and destination MAC is MY_MAC resolved
+ # by VPP using ARP.
+ self.assertEqual(pkt[Ether].src, self.pg0.local_mac)
+ if not local_only:
+ if not mcast_pkt:
+ self.assertEqual(pkt[Ether].dst, self.pg0.remote_mac)
+ else:
+ self.assertEqual(pkt[Ether].dst, type(self).mcast_mac)
+ # Verify VXLAN GBP tunnel source IP is VPP_IP and destination IP is
+ # MY_IP.
+ self.assertEqual(pkt[IP].src, self.pg0.local_ip4)
+ if not local_only:
+ if not mcast_pkt:
+ self.assertEqual(pkt[IP].dst, self.pg0.remote_ip4)
+ else:
+ self.assertEqual(pkt[IP].dst, type(self).mcast_ip4)
+ # Verify UDP destination port is VXLAN GBP 48879, source UDP port could
+ # be arbitrary.
+ self.assertEqual(pkt[UDP].dport, type(self).dport)
+ # Verify UDP checksum
+ self.assert_udp_checksum_valid(pkt)
+ # Verify VNI
+ # pkt.show()
+ self.assertEqual(pkt[VXLAN].vni, vni)
+ # Verify Source Class
+ self.assertEqual(pkt[VXLAN].gpid, 0)
+
+ @classmethod
+ def create_vxlan_gbp_flood_test_bd(cls, vni, n_ucast_tunnels):
+ # Create 2 ucast vxlan tunnels under bd
+ ip_range_start = 10
+ ip_range_end = ip_range_start + n_ucast_tunnels
+ next_hop_address = cls.pg0.remote_ip4
+ for dest_ip4 in ip4_range(cls.pg0.remote_ip4,
+ ip_range_start,
+ ip_range_end):
+ # add host route so dest_ip4 will not be resolved
+ rip = VppIpRoute(cls, dest_ip4, 32,
+ [VppRoutePath(next_hop_address,
+ INVALID_INDEX)],
+ register=False)
+ rip.add_vpp_config()
+ r = cls.vapi.vxlan_gbp_tunnel_add_del(
+ tunnel={
+ 'src': cls.pg0.local_ip4,
+ 'dst': dest_ip4,
+ 'vni': vni,
+ 'instance': INVALID_INDEX,
+ 'mcast_sw_if_index': INVALID_INDEX,
+ 'mode': 1,
+ },
+ is_add=1
+ )
+ cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
+ bd_id=vni)
+
+ # Class method to start the VXLAN GBP test case.
+ # Overrides setUpClass method in VppTestCase class.
+ # Python try..except statement is used to ensure that the tear down of
+ # the class will be executed even if exception is raised.
+ # @param cls The class pointer.
+ @classmethod
+ def setUpClass(cls):
+ super(TestVxlanGbp, cls).setUpClass()
+
+ try:
+ cls.dport = 48879
+ cls.flags = 0x88
+ cls.gpflags = 0x0
+ cls.sclass = 0
+
+ # Create 2 pg interfaces.
+ cls.create_pg_interfaces(range(4))
+ for pg in cls.pg_interfaces:
+ pg.admin_up()
+
+ # Configure IPv4 addresses on VPP pg0.
+ cls.pg0.config_ip4()
+
+ # Resolve MAC address for VPP's IP address on pg0.
+ cls.pg0.resolve_arp()
+
+ # Create VXLAN GBP VTEP on VPP pg0, and put vxlan_gbp_tunnel0 and
+ # pg1 into BD.
+ cls.single_tunnel_bd = 1
+ cls.single_tunnel_vni = 0xabcde
+ r = cls.vapi.vxlan_gbp_tunnel_add_del(
+ tunnel={
+ 'src': cls.pg0.local_ip4,
+ 'dst': cls.pg0.remote_ip4,
+ 'vni': cls.single_tunnel_vni,
+ 'instance': INVALID_INDEX,
+ 'mcast_sw_if_index': INVALID_INDEX,
+ 'mode': 1,
+ },
+ is_add=1
+ )
+ cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
+ bd_id=cls.single_tunnel_bd)
+ cls.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=cls.pg1.sw_if_index,
+ bd_id=cls.single_tunnel_bd)
+
+ # Setup vni 2 to test multicast flooding
+ cls.n_ucast_tunnels = 2
+ # Setup vni 3 to test unicast flooding
+ cls.ucast_flood_bd = 3
+ cls.create_vxlan_gbp_flood_test_bd(cls.ucast_flood_bd,
+ cls.n_ucast_tunnels)
+ cls.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=cls.pg3.sw_if_index,
+ bd_id=cls.ucast_flood_bd)
+ except Exception:
+ super(TestVxlanGbp, cls).tearDownClass()
+ raise
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestVxlanGbp, cls).tearDownClass()
+
+ def assert_eq_pkts(self, pkt1, pkt2):
+ """ Verify the Ether, IP, UDP, payload are equal in both
+ packets
+ """
+ self.assertEqual(pkt1[Ether].src, pkt2[Ether].src)
+ self.assertEqual(pkt1[Ether].dst, pkt2[Ether].dst)
+ self.assertEqual(pkt1[IP].src, pkt2[IP].src)
+ self.assertEqual(pkt1[IP].dst, pkt2[IP].dst)
+ self.assertEqual(pkt1[UDP].sport, pkt2[UDP].sport)
+ self.assertEqual(pkt1[UDP].dport, pkt2[UDP].dport)
+ self.assertEqual(pkt1[Raw], pkt2[Raw])
+
+ def test_decap(self):
+ """ Decapsulation test
+ Send encapsulated frames from pg0
+ Verify receipt of decapsulated frames on pg1
+ """
+ encapsulated_pkt = self.encapsulate(self.frame_request,
+ self.single_tunnel_vni)
+
+ self.pg0.add_stream([encapsulated_pkt, ])
+
+ self.pg1.enable_capture()
+
+ self.pg_start()
+
+ # Pick first received frame and check if it's the non-encapsulated
+ # frame
+ out = self.pg1.get_capture(1)
+ pkt = out[0]
+ self.assert_eq_pkts(pkt, self.frame_request)
+
+ def test_encap(self):
+ """ Encapsulation test
+ Send frames from pg1
+ Verify receipt of encapsulated frames on pg0
+ """
+ self.pg1.add_stream([self.frame_reply])
+
+ self.pg0.enable_capture()
+
+ self.pg_start()
+
+ # Pick first received frame and check if it's correctly encapsulated.
+ out = self.pg0.get_capture(1)
+ pkt = out[0]
+ self.check_encapsulation(pkt, self.single_tunnel_vni)
+
+ payload = self.decapsulate(pkt)
+ self.assert_eq_pkts(payload, self.frame_reply)
+
+ def test_ucast_flood(self):
+ """ Unicast flood test
+ Send frames from pg3
+ Verify receipt of encapsulated frames on pg0
+ """
+ self.pg3.add_stream([self.frame_reply])
+
+ self.pg0.enable_capture()
+
+ self.pg_start()
+
+ # Get packet from each tunnel and assert it's correctly encapsulated.
+ out = self.pg0.get_capture(self.n_ucast_tunnels)
+ for pkt in out:
+ self.check_encapsulation(pkt, self.ucast_flood_bd, True)
+ payload = self.decapsulate(pkt)
+ self.assert_eq_pkts(payload, self.frame_reply)
+
+ def test_encap_big_packet(self):
+ """ Encapsulation test send big frame from pg1
+ Verify receipt of encapsulated frames on pg0
+ """
+
+ self.vapi.sw_interface_set_mtu(self.pg0.sw_if_index, [1500, 0, 0, 0])
+
+ frame = (Ether(src='00:00:00:00:00:02', dst='00:00:00:00:00:01') /
+ IP(src='4.3.2.1', dst='1.2.3.4') /
+ UDP(sport=20000, dport=10000) /
+ Raw(b'\xa5' * 1450))
+
+ self.pg1.add_stream([frame])
+
+ self.pg0.enable_capture()
+
+ self.pg_start()
+
+ # Pick first received frame and check if it's correctly encapsulated.
+ out = self.pg0.get_capture(2)
+ pkt = reassemble4_ether(out)
+ self.check_encapsulation(pkt, self.single_tunnel_vni)
+
+ payload = self.decapsulate(pkt)
+ self.assert_eq_pkts(payload, frame)
+
+# Method to define VPP actions before tear down of the test case.
+# Overrides tearDown method in VppTestCase class.
+# @param self The object pointer.
+ def tearDown(self):
+ super(TestVxlanGbp, self).tearDown()
+
+ def show_commands_at_teardown(self):
+ self.logger.info(self.vapi.cli("show bridge-domain 1 detail"))
+ self.logger.info(self.vapi.cli("show bridge-domain 3 detail"))
+ self.logger.info(self.vapi.cli("show vxlan-gbp tunnel"))
+ self.logger.info(self.vapi.cli("show error"))
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_vxlan_gpe.py b/test/test_vxlan_gpe.py
new file mode 100644
index 00000000000..c5d6bf07f7c
--- /dev/null
+++ b/test/test_vxlan_gpe.py
@@ -0,0 +1,265 @@
+#!/usr/bin/env python3
+
+import socket
+from util import ip4_range
+import unittest
+from framework import VppTestCase, VppTestRunner, running_extended_tests
+from template_bd import BridgeDomain
+
+from scapy.layers.l2 import Ether
+from scapy.packet import Raw
+from scapy.layers.inet import IP, UDP
+from scapy.layers.vxlan import VXLAN
+
+import util
+from vpp_ip_route import VppIpRoute, VppRoutePath
+
+from vpp_ip import INVALID_INDEX
+
+
+@unittest.skipUnless(running_extended_tests, "part of extended tests")
+class TestVxlanGpe(BridgeDomain, VppTestCase):
+ """ VXLAN-GPE Test Case """
+
+ def __init__(self, *args):
+ BridgeDomain.__init__(self)
+ VppTestCase.__init__(self, *args)
+
+ def encapsulate(self, pkt, vni):
+ """
+ Encapsulate the original payload frame by adding VXLAN-GPE header
+ with its UDP, IP and Ethernet fields
+ """
+ return (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
+ UDP(sport=self.dport, dport=self.dport, chksum=0) /
+ VXLAN(vni=vni, flags=self.flags) /
+ pkt)
+
+ def ip_range(self, start, end):
+ """ range of remote ip's """
+ return ip4_range(self.pg0.remote_ip4, start, end)
+
+ def encap_mcast(self, pkt, src_ip, src_mac, vni):
+ """
+ Encapsulate the original payload frame by adding VXLAN-GPE header
+ with its UDP, IP and Ethernet fields
+ """
+ return (Ether(src=src_mac, dst=self.mcast_mac) /
+ IP(src=src_ip, dst=self.mcast_ip4) /
+ UDP(sport=self.dport, dport=self.dport, chksum=0) /
+ VXLAN(vni=vni, flags=self.flags) /
+ pkt)
+
+ def decapsulate(self, pkt):
+ """
+ Decapsulate the original payload frame by removing VXLAN-GPE header
+ """
+ # check if is set I and P flag
+ self.assertEqual(pkt[VXLAN].flags, 0x0c)
+ return pkt[VXLAN].payload
+
+ # Method for checking VXLAN-GPE encapsulation.
+ #
+ def check_encapsulation(self, pkt, vni, local_only=False, mcast_pkt=False):
+ # Verify source MAC is VPP_MAC and destination MAC is MY_MAC resolved
+ # by VPP using ARP.
+ self.assertEqual(pkt[Ether].src, self.pg0.local_mac)
+ if not local_only:
+ if not mcast_pkt:
+ self.assertEqual(pkt[Ether].dst, self.pg0.remote_mac)
+ else:
+ self.assertEqual(pkt[Ether].dst, type(self).mcast_mac)
+ # Verify VXLAN-GPE tunnel src IP is VPP_IP and dst IP is MY_IP.
+ self.assertEqual(pkt[IP].src, self.pg0.local_ip4)
+ if not local_only:
+ if not mcast_pkt:
+ self.assertEqual(pkt[IP].dst, self.pg0.remote_ip4)
+ else:
+ self.assertEqual(pkt[IP].dst, type(self).mcast_ip4)
+ # Verify UDP destination port is VXLAN-GPE 4790, source UDP port
+ # could be arbitrary.
+ self.assertEqual(pkt[UDP].dport, type(self).dport)
+ # Verify UDP checksum
+ self.assert_udp_checksum_valid(pkt)
+ # Verify VNI
+ self.assertEqual(pkt[VXLAN].vni, vni)
+
+ @classmethod
+ def create_vxlan_gpe_flood_test_bd(cls, vni, n_ucast_tunnels):
+ # Create 10 ucast vxlan tunnels under bd
+ ip_range_start = 10
+ ip_range_end = ip_range_start + n_ucast_tunnels
+ next_hop_address = cls.pg0.remote_ip4
+ for dest_ip4 in ip4_range(next_hop_address, ip_range_start,
+ ip_range_end):
+ # add host route so dest_ip4n will not be resolved
+ rip = VppIpRoute(cls, dest_ip4, 32,
+ [VppRoutePath(next_hop_address,
+ INVALID_INDEX)],
+ register=False)
+ rip.add_vpp_config()
+
+ r = cls.vapi.vxlan_gpe_add_del_tunnel(
+ src_addr=cls.pg0.local_ip4,
+ dst_addr=dest_ip4,
+ vni=vni)
+ cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
+ bd_id=vni)
+
+ @classmethod
+ def add_del_shared_mcast_dst_load(cls, is_add):
+ """
+ add or del tunnels sharing the same mcast dst
+ to test vxlan_gpe ref_count mechanism
+ """
+ n_shared_dst_tunnels = 20
+ vni_start = 1000
+ vni_end = vni_start + n_shared_dst_tunnels
+ for vni in range(vni_start, vni_end):
+ r = cls.vapi.vxlan_gpe_add_del_tunnel(
+ local=cls.pg0.local_ip4,
+ remote=cls.mcast_ip4,
+ mcast_sw_if_index=1,
+ vni=vni,
+ is_add=is_add)
+ if r.sw_if_index == 0xffffffff:
+ raise ValueError("bad sw_if_index: ~0")
+
+ @classmethod
+ def add_shared_mcast_dst_load(cls):
+ cls.add_del_shared_mcast_dst_load(is_add=1)
+
+ @classmethod
+ def del_shared_mcast_dst_load(cls):
+ cls.add_del_shared_mcast_dst_load(is_add=0)
+
+ @classmethod
+ def add_del_mcast_tunnels_load(cls, is_add):
+ """
+ add or del tunnels to test vxlan_gpe stability
+ """
+ n_distinct_dst_tunnels = 20
+ ip_range_start = 10
+ ip_range_end = ip_range_start + n_distinct_dst_tunnels
+ for dest_ip4 in ip4_range(cls.mcast_ip4, ip_range_start,
+ ip_range_end):
+ vni = int(dest_ip4.split(".")[3])
+ cls.vapi.vxlan_gpe_add_del_tunnel(
+ src_addr=cls.pg0.local_ip4,
+ dst_addr=dest_ip4,
+ mcast_sw_if_index=1,
+ vni=vni,
+ is_add=is_add)
+
+ @classmethod
+ def add_mcast_tunnels_load(cls):
+ cls.add_del_mcast_tunnels_load(is_add=1)
+
+ @classmethod
+ def del_mcast_tunnels_load(cls):
+ cls.add_del_mcast_tunnels_load(is_add=0)
+
+ # Class method to start the VXLAN-GPE test case.
+ # Overrides setUpClass method in VppTestCase class.
+ # Python try..except statement is used to ensure that the tear down of
+ # the class will be executed even if exception is raised.
+ # @param cls The class pointer.
+ @classmethod
+ def setUpClass(cls):
+ super(TestVxlanGpe, cls).setUpClass()
+
+ try:
+ cls.dport = 4790
+ cls.flags = 0x0c
+
+ # Create 2 pg interfaces.
+ cls.create_pg_interfaces(range(4))
+ for pg in cls.pg_interfaces:
+ pg.admin_up()
+
+ # Configure IPv4 addresses on VPP pg0.
+ cls.pg0.config_ip4()
+
+ # Resolve MAC address for VPP's IP address on pg0.
+ cls.pg0.resolve_arp()
+
+ # Our Multicast address
+ cls.mcast_ip4 = '239.1.1.1'
+ cls.mcast_mac = util.mcast_ip_to_mac(cls.mcast_ip4)
+
+ # Create VXLAN-GPE VTEP on VPP pg0, and put vxlan_gpe_tunnel0
+ # and pg1 into BD.
+ cls.single_tunnel_vni = 0xabcde
+ cls.single_tunnel_bd = 11
+ r = cls.vapi.vxlan_gpe_add_del_tunnel(
+ src_addr=cls.pg0.local_ip4,
+ dst_addr=cls.pg0.remote_ip4,
+ vni=cls.single_tunnel_vni)
+ cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
+ bd_id=cls.single_tunnel_bd)
+ cls.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=cls.pg1.sw_if_index, bd_id=cls.single_tunnel_bd)
+
+ # Setup vni 2 to test multicast flooding
+ cls.n_ucast_tunnels = 10
+ cls.mcast_flood_bd = 12
+ cls.create_vxlan_gpe_flood_test_bd(cls.mcast_flood_bd,
+ cls.n_ucast_tunnels)
+ r = cls.vapi.vxlan_gpe_add_del_tunnel(
+ src_addr=cls.pg0.local_ip4,
+ dst_addr=cls.mcast_ip4,
+ mcast_sw_if_index=1,
+ vni=cls.mcast_flood_bd)
+ cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
+ bd_id=cls.mcast_flood_bd)
+ cls.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=cls.pg2.sw_if_index, bd_id=cls.mcast_flood_bd)
+
+ # Add and delete mcast tunnels to check stability
+ cls.add_shared_mcast_dst_load()
+ cls.add_mcast_tunnels_load()
+ cls.del_shared_mcast_dst_load()
+ cls.del_mcast_tunnels_load()
+
+ # Setup vni 3 to test unicast flooding
+ cls.ucast_flood_bd = 13
+ cls.create_vxlan_gpe_flood_test_bd(cls.ucast_flood_bd,
+ cls.n_ucast_tunnels)
+ cls.vapi.sw_interface_set_l2_bridge(
+ rx_sw_if_index=cls.pg3.sw_if_index, bd_id=cls.ucast_flood_bd)
+ except Exception:
+ super(TestVxlanGpe, cls).tearDownClass()
+ raise
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestVxlanGpe, cls).tearDownClass()
+
+ @unittest.skip("test disabled for vxlan-gpe")
+ def test_mcast_flood(self):
+ """ inherited from BridgeDomain """
+ pass
+
+ @unittest.skip("test disabled for vxlan-gpe")
+ def test_mcast_rcv(self):
+ """ inherited from BridgeDomain """
+ pass
+
+ # Method to define VPP actions before tear down of the test case.
+ # Overrides tearDown method in VppTestCase class.
+ # @param self The object pointer.
+ def tearDown(self):
+ super(TestVxlanGpe, self).tearDown()
+
+ def show_commands_at_teardown(self):
+ self.logger.info(self.vapi.cli("show bridge-domain 11 detail"))
+ self.logger.info(self.vapi.cli("show bridge-domain 12 detail"))
+ self.logger.info(self.vapi.cli("show bridge-domain 13 detail"))
+ self.logger.info(self.vapi.cli("show int"))
+ self.logger.info(self.vapi.cli("show vxlan-gpe"))
+ self.logger.info(self.vapi.cli("show trace"))
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/test/test_wireguard.py b/test/test_wireguard.py
new file mode 100755
index 00000000000..edc305b1336
--- /dev/null
+++ b/test/test_wireguard.py
@@ -0,0 +1,748 @@
+#!/usr/bin/env python3
+""" Wg tests """
+
+import datetime
+import base64
+
+from hashlib import blake2s
+from scapy.packet import Packet
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether, ARP
+from scapy.layers.inet import IP, UDP
+from scapy.contrib.wireguard import Wireguard, WireguardResponse, \
+ WireguardInitiation, WireguardTransport
+from cryptography.hazmat.primitives.asymmetric.x25519 import \
+ X25519PrivateKey, X25519PublicKey
+from cryptography.hazmat.primitives.serialization import Encoding, \
+ PrivateFormat, PublicFormat, NoEncryption
+from cryptography.hazmat.primitives.hashes import BLAKE2s, Hash
+from cryptography.hazmat.primitives.hmac import HMAC
+from cryptography.hazmat.backends import default_backend
+from noise.connection import NoiseConnection, Keypair
+
+from vpp_ipip_tun_interface import VppIpIpTunInterface
+from vpp_interface import VppInterface
+from vpp_object import VppObject
+from framework import VppTestCase
+from re import compile
+import unittest
+
+""" TestWg is a subclass of VPPTestCase classes.
+
+Wg test.
+
+"""
+
+
+def private_key_bytes(k):
+ return k.private_bytes(Encoding.Raw,
+ PrivateFormat.Raw,
+ NoEncryption())
+
+
+def public_key_bytes(k):
+ return k.public_bytes(Encoding.Raw,
+ PublicFormat.Raw)
+
+
+class VppWgInterface(VppInterface):
+ """
+ VPP WireGuard interface
+ """
+
+ def __init__(self, test, src, port):
+ super(VppWgInterface, self).__init__(test)
+
+ self.port = port
+ self.src = src
+ self.private_key = X25519PrivateKey.generate()
+ self.public_key = self.private_key.public_key()
+
+ def public_key_bytes(self):
+ return public_key_bytes(self.public_key)
+
+ def private_key_bytes(self):
+ return private_key_bytes(self.private_key)
+
+ def add_vpp_config(self):
+ r = self.test.vapi.wireguard_interface_create(interface={
+ 'user_instance': 0xffffffff,
+ 'port': self.port,
+ 'src_ip': self.src,
+ 'private_key': private_key_bytes(self.private_key),
+ 'generate_key': False
+ })
+ self.set_sw_if_index(r.sw_if_index)
+ self.test.registry.register(self, self.test.logger)
+ return self
+
+ def remove_vpp_config(self):
+ self.test.vapi.wireguard_interface_delete(
+ sw_if_index=self._sw_if_index)
+
+ def query_vpp_config(self):
+ ts = self.test.vapi.wireguard_interface_dump(sw_if_index=0xffffffff)
+ for t in ts:
+ if t.interface.sw_if_index == self._sw_if_index and \
+ str(t.interface.src_ip) == self.src and \
+ t.interface.port == self.port and \
+ t.interface.private_key == private_key_bytes(self.private_key):
+ return True
+ return False
+
+ def __str__(self):
+ return self.object_id()
+
+ def object_id(self):
+ return "wireguard-%d" % self._sw_if_index
+
+
+def find_route(test, prefix, table_id=0):
+ routes = test.vapi.ip_route_dump(table_id, False)
+
+ for e in routes:
+ if table_id == e.route.table_id \
+ and str(e.route.prefix) == str(prefix):
+ return True
+ return False
+
+
+NOISE_HANDSHAKE_NAME = b"Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s"
+NOISE_IDENTIFIER_NAME = b"WireGuard v1 zx2c4 Jason@zx2c4.com"
+
+
+class VppWgPeer(VppObject):
+
+ def __init__(self,
+ test,
+ itf,
+ endpoint,
+ port,
+ allowed_ips,
+ persistent_keepalive=15):
+ self._test = test
+ self.itf = itf
+ self.endpoint = endpoint
+ self.port = port
+ self.allowed_ips = allowed_ips
+ self.persistent_keepalive = persistent_keepalive
+
+ # remote peer's public
+ self.private_key = X25519PrivateKey.generate()
+ self.public_key = self.private_key.public_key()
+
+ self.noise = NoiseConnection.from_name(NOISE_HANDSHAKE_NAME)
+
+ def validate_routing(self):
+ for a in self.allowed_ips:
+ self._test.assertTrue(find_route(self._test, a))
+
+ def validate_no_routing(self):
+ for a in self.allowed_ips:
+ self._test.assertFalse(find_route(self._test, a))
+
+ def add_vpp_config(self):
+ rv = self._test.vapi.wireguard_peer_add(
+ peer={
+ 'public_key': self.public_key_bytes(),
+ 'port': self.port,
+ 'endpoint': self.endpoint,
+ 'n_allowed_ips': len(self.allowed_ips),
+ 'allowed_ips': self.allowed_ips,
+ 'sw_if_index': self.itf.sw_if_index,
+ 'persistent_keepalive': self.persistent_keepalive})
+ self.index = rv.peer_index
+ self.receiver_index = self.index + 1
+ self._test.registry.register(self, self._test.logger)
+ self.validate_routing()
+ return self
+
+ def remove_vpp_config(self):
+ self._test.vapi.wireguard_peer_remove(peer_index=self.index)
+ self.validate_no_routing()
+
+ def object_id(self):
+ return ("wireguard-peer-%s" % self.index)
+
+ def public_key_bytes(self):
+ return public_key_bytes(self.public_key)
+
+ def query_vpp_config(self):
+ peers = self._test.vapi.wireguard_peers_dump()
+
+ for p in peers:
+ if p.peer.public_key == self.public_key_bytes() and \
+ p.peer.port == self.port and \
+ str(p.peer.endpoint) == self.endpoint and \
+ p.peer.sw_if_index == self.itf.sw_if_index and \
+ len(self.allowed_ips) == p.peer.n_allowed_ips:
+ self.allowed_ips.sort()
+ p.peer.allowed_ips.sort()
+
+ for (a1, a2) in zip(self.allowed_ips, p.peer.allowed_ips):
+ if str(a1) != str(a2):
+ return False
+ return True
+ return False
+
+ def set_responder(self):
+ self.noise.set_as_responder()
+
+ def mk_tunnel_header(self, tx_itf):
+ return (Ether(dst=tx_itf.local_mac, src=tx_itf.remote_mac) /
+ IP(src=self.endpoint, dst=self.itf.src) /
+ UDP(sport=self.port, dport=self.itf.port))
+
+ def noise_init(self, public_key=None):
+ self.noise.set_prologue(NOISE_IDENTIFIER_NAME)
+ self.noise.set_psks(psk=bytes(bytearray(32)))
+
+ if not public_key:
+ public_key = self.itf.public_key
+
+ # local/this private
+ self.noise.set_keypair_from_private_bytes(
+ Keypair.STATIC,
+ private_key_bytes(self.private_key))
+ # remote's public
+ self.noise.set_keypair_from_public_bytes(
+ Keypair.REMOTE_STATIC,
+ public_key_bytes(public_key))
+
+ self.noise.start_handshake()
+
+ def mk_handshake(self, tx_itf, public_key=None):
+ self.noise.set_as_initiator()
+ self.noise_init(public_key)
+
+ p = (Wireguard() / WireguardInitiation())
+
+ p[Wireguard].message_type = 1
+ p[Wireguard].reserved_zero = 0
+ p[WireguardInitiation].sender_index = self.receiver_index
+
+ # some random data for the message
+ # lifted from the noise protocol's wireguard example
+ now = datetime.datetime.now()
+ tai = struct.pack('!qi', 4611686018427387914 + int(now.timestamp()),
+ int(now.microsecond * 1e3))
+ b = self.noise.write_message(payload=tai)
+
+ # load noise into init message
+ p[WireguardInitiation].unencrypted_ephemeral = b[0:32]
+ p[WireguardInitiation].encrypted_static = b[32:80]
+ p[WireguardInitiation].encrypted_timestamp = b[80:108]
+
+ # generate the mac1 hash
+ mac_key = blake2s(b'mac1----' +
+ self.itf.public_key_bytes()).digest()
+ p[WireguardInitiation].mac1 = blake2s(bytes(p)[0:116],
+ digest_size=16,
+ key=mac_key).digest()
+ p[WireguardInitiation].mac2 = bytearray(16)
+
+ p = (self.mk_tunnel_header(tx_itf) / p)
+
+ return p
+
+ def verify_header(self, p):
+ self._test.assertEqual(p[IP].src, self.itf.src)
+ self._test.assertEqual(p[IP].dst, self.endpoint)
+ self._test.assertEqual(p[UDP].sport, self.itf.port)
+ self._test.assertEqual(p[UDP].dport, self.port)
+ self._test.assert_packet_checksums_valid(p)
+
+ def consume_init(self, p, tx_itf):
+ self.noise.set_as_responder()
+ self.noise_init(self.itf.public_key)
+ self.verify_header(p)
+
+ init = Wireguard(p[Raw])
+
+ self._test.assertEqual(init[Wireguard].message_type, 1)
+ self._test.assertEqual(init[Wireguard].reserved_zero, 0)
+
+ self.sender = init[WireguardInitiation].sender_index
+
+ # validate the hash
+ mac_key = blake2s(b'mac1----' +
+ public_key_bytes(self.public_key)).digest()
+ mac1 = blake2s(bytes(init)[0:-32],
+ digest_size=16,
+ key=mac_key).digest()
+ self._test.assertEqual(init[WireguardInitiation].mac1, mac1)
+
+ # this passes only unencrypted_ephemeral, encrypted_static,
+ # encrypted_timestamp fields of the init
+ payload = self.noise.read_message(bytes(init)[8:-32])
+
+ # build the response
+ b = self.noise.write_message()
+ mac_key = blake2s(b'mac1----' +
+ public_key_bytes(self.itf.public_key)).digest()
+ resp = (Wireguard(message_type=2, reserved_zero=0) /
+ WireguardResponse(sender_index=self.receiver_index,
+ receiver_index=self.sender,
+ unencrypted_ephemeral=b[0:32],
+ encrypted_nothing=b[32:]))
+ mac1 = blake2s(bytes(resp)[:-32],
+ digest_size=16,
+ key=mac_key).digest()
+ resp[WireguardResponse].mac1 = mac1
+
+ resp = (self.mk_tunnel_header(tx_itf) / resp)
+ self._test.assertTrue(self.noise.handshake_finished)
+
+ return resp
+
+ def consume_response(self, p):
+ self.verify_header(p)
+
+ resp = Wireguard(p[Raw])
+
+ self._test.assertEqual(resp[Wireguard].message_type, 2)
+ self._test.assertEqual(resp[Wireguard].reserved_zero, 0)
+ self._test.assertEqual(resp[WireguardResponse].receiver_index,
+ self.receiver_index)
+
+ self.sender = resp[Wireguard].sender_index
+
+ payload = self.noise.read_message(bytes(resp)[12:60])
+ self._test.assertEqual(payload, b'')
+ self._test.assertTrue(self.noise.handshake_finished)
+
+ def decrypt_transport(self, p):
+ self.verify_header(p)
+
+ p = Wireguard(p[Raw])
+ self._test.assertEqual(p[Wireguard].message_type, 4)
+ self._test.assertEqual(p[Wireguard].reserved_zero, 0)
+ self._test.assertEqual(p[WireguardTransport].receiver_index,
+ self.receiver_index)
+
+ d = self.noise.decrypt(
+ p[WireguardTransport].encrypted_encapsulated_packet)
+ return d
+
+ def encrypt_transport(self, p):
+ return self.noise.encrypt(bytes(p))
+
+ def validate_encapped(self, rxs, tx):
+ for rx in rxs:
+ rx = IP(self.decrypt_transport(rx))
+
+ # chech the oringial packet is present
+ self._test.assertEqual(rx[IP].dst, tx[IP].dst)
+ self._test.assertEqual(rx[IP].ttl, tx[IP].ttl-1)
+
+
+class TestWg(VppTestCase):
+ """ Wireguard Test Case """
+
+ error_str = compile(r"Error")
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestWg, cls).setUpClass()
+ try:
+ cls.create_pg_interfaces(range(3))
+ for i in cls.pg_interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+
+ except Exception:
+ super(TestWg, cls).tearDownClass()
+ raise
+
+ @classmethod
+ def tearDownClass(cls):
+ super(TestWg, cls).tearDownClass()
+
+ def test_wg_interface(self):
+ """ Simple interface creation """
+ port = 12312
+
+ # Create interface
+ wg0 = VppWgInterface(self,
+ self.pg1.local_ip4,
+ port).add_vpp_config()
+
+ self.logger.info(self.vapi.cli("sh int"))
+
+ # delete interface
+ wg0.remove_vpp_config()
+
+ def test_handshake_hash(self):
+ """ test hashing an init message """
+ # a init packet generated by linux given the key below
+ h = "0100000098b9032b" \
+ "55cc4b39e73c3d24" \
+ "a2a1ab884b524a81" \
+ "1808bb86640fb70d" \
+ "e93154fec1879125" \
+ "ab012624a27f0b75" \
+ "c0a2582f438ddb5f" \
+ "8e768af40b4ab444" \
+ "02f9ff473e1b797e" \
+ "80d39d93c5480c82" \
+ "a3d4510f70396976" \
+ "586fb67300a5167b" \
+ "ae6ca3ff3dfd00eb" \
+ "59be198810f5aa03" \
+ "6abc243d2155ee4f" \
+ "2336483900aef801" \
+ "08752cd700000000" \
+ "0000000000000000" \
+ "00000000"
+
+ b = bytearray.fromhex(h)
+ tgt = Wireguard(b)
+
+ pubb = base64.b64decode("aRuHFTTxICIQNefp05oKWlJv3zgKxb8+WW7JJMh0jyM=")
+ pub = X25519PublicKey.from_public_bytes(pubb)
+
+ self.assertEqual(pubb, public_key_bytes(pub))
+
+ # strip the macs and build a new packet
+ init = b[0:-32]
+ mac_key = blake2s(b'mac1----' + public_key_bytes(pub)).digest()
+ init += blake2s(init,
+ digest_size=16,
+ key=mac_key).digest()
+ init += b'\x00' * 16
+
+ act = Wireguard(init)
+
+ self.assertEqual(tgt, act)
+
+ def test_wg_peer_resp(self):
+ """ Send handshake response """
+ wg_output_node_name = '/err/wg-output-tun/'
+ wg_input_node_name = '/err/wg-input/'
+
+ port = 12323
+
+ # Create interfaces
+ wg0 = VppWgInterface(self,
+ self.pg1.local_ip4,
+ port).add_vpp_config()
+ wg0.admin_up()
+ wg0.config_ip4()
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ peer_1 = VppWgPeer(self,
+ wg0,
+ self.pg1.remote_ip4,
+ port+1,
+ ["10.11.2.0/24",
+ "10.11.3.0/24"]).add_vpp_config()
+ self.assertEqual(len(self.vapi.wireguard_peers_dump()), 1)
+
+ # wait for the peer to send a handshake
+ rx = self.pg1.get_capture(1, timeout=2)
+
+ # consume the handshake in the noise protocol and
+ # generate the response
+ resp = peer_1.consume_init(rx[0], self.pg1)
+
+ # send the response, get keepalive
+ rxs = self.send_and_expect(self.pg1, [resp], self.pg1)
+
+ for rx in rxs:
+ b = peer_1.decrypt_transport(rx)
+ self.assertEqual(0, len(b))
+
+ # send a packets that are routed into the tunnel
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst="10.11.3.2") /
+ UDP(sport=555, dport=556) /
+ Raw(b'\x00' * 80))
+
+ rxs = self.send_and_expect(self.pg0, p * 255, self.pg1)
+
+ peer_1.validate_encapped(rxs, p)
+
+ # send packets into the tunnel, expect to receive them on
+ # the other side
+ p = [(peer_1.mk_tunnel_header(self.pg1) /
+ Wireguard(message_type=4, reserved_zero=0) /
+ WireguardTransport(
+ receiver_index=peer_1.sender,
+ counter=ii,
+ encrypted_encapsulated_packet=peer_1.encrypt_transport(
+ (IP(src="10.11.3.1", dst=self.pg0.remote_ip4, ttl=20) /
+ UDP(sport=222, dport=223) /
+ Raw())))) for ii in range(255)]
+
+ rxs = self.send_and_expect(self.pg1, p, self.pg0)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].dst, self.pg0.remote_ip4)
+ self.assertEqual(rx[IP].ttl, 19)
+
+ def test_wg_peer_init(self):
+ """ Send handshake init """
+ wg_output_node_name = '/err/wg-output-tun/'
+ wg_input_node_name = '/err/wg-input/'
+
+ port = 12333
+
+ # Create interfaces
+ wg0 = VppWgInterface(self,
+ self.pg1.local_ip4,
+ port).add_vpp_config()
+ wg0.admin_up()
+ wg0.config_ip4()
+
+ peer_1 = VppWgPeer(self,
+ wg0,
+ self.pg1.remote_ip4,
+ port+1,
+ ["10.11.2.0/24",
+ "10.11.3.0/24"]).add_vpp_config()
+ self.assertEqual(len(self.vapi.wireguard_peers_dump()), 1)
+
+ # route a packet into the wg interface
+ # use the allowed-ip prefix
+ # this is dropped because the peer is not initiated
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst="10.11.3.2") /
+ UDP(sport=555, dport=556) /
+ Raw())
+ self.send_and_assert_no_replies(self.pg0, [p])
+
+ kp_error = wg_output_node_name + "Keypair error"
+ self.assertEqual(1, self.statistics.get_err_counter(kp_error))
+
+ # send a handsake from the peer with an invalid MAC
+ p = peer_1.mk_handshake(self.pg1)
+ p[WireguardInitiation].mac1 = b'foobar'
+ self.send_and_assert_no_replies(self.pg1, [p])
+ self.assertEqual(1, self.statistics.get_err_counter(
+ wg_input_node_name + "Invalid MAC handshake"))
+
+ # send a handsake from the peer but signed by the wrong key.
+ p = peer_1.mk_handshake(self.pg1,
+ X25519PrivateKey.generate().public_key())
+ self.send_and_assert_no_replies(self.pg1, [p])
+ self.assertEqual(1, self.statistics.get_err_counter(
+ wg_input_node_name + "Peer error"))
+
+ # send a valid handsake init for which we expect a response
+ p = peer_1.mk_handshake(self.pg1)
+
+ rx = self.send_and_expect(self.pg1, [p], self.pg1)
+
+ peer_1.consume_response(rx[0])
+
+ # route a packet into the wg interface
+ # this is dropped because the peer is still not initiated
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst="10.11.3.2") /
+ UDP(sport=555, dport=556) /
+ Raw())
+ self.send_and_assert_no_replies(self.pg0, [p])
+ self.assertEqual(2, self.statistics.get_err_counter(kp_error))
+
+ # send a data packet from the peer through the tunnel
+ # this completes the handshake
+ p = (IP(src="10.11.3.1", dst=self.pg0.remote_ip4, ttl=20) /
+ UDP(sport=222, dport=223) /
+ Raw())
+ d = peer_1.encrypt_transport(p)
+ p = (peer_1.mk_tunnel_header(self.pg1) /
+ (Wireguard(message_type=4, reserved_zero=0) /
+ WireguardTransport(receiver_index=peer_1.sender,
+ counter=0,
+ encrypted_encapsulated_packet=d)))
+ rxs = self.send_and_expect(self.pg1, [p], self.pg0)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].dst, self.pg0.remote_ip4)
+ self.assertEqual(rx[IP].ttl, 19)
+
+ # send a packets that are routed into the tunnel
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst="10.11.3.2") /
+ UDP(sport=555, dport=556) /
+ Raw(b'\x00' * 80))
+
+ rxs = self.send_and_expect(self.pg0, p * 255, self.pg1)
+
+ for rx in rxs:
+ rx = IP(peer_1.decrypt_transport(rx))
+
+ # chech the oringial packet is present
+ self.assertEqual(rx[IP].dst, p[IP].dst)
+ self.assertEqual(rx[IP].ttl, p[IP].ttl-1)
+
+ # send packets into the tunnel, expect to receive them on
+ # the other side
+ p = [(peer_1.mk_tunnel_header(self.pg1) /
+ Wireguard(message_type=4, reserved_zero=0) /
+ WireguardTransport(
+ receiver_index=peer_1.sender,
+ counter=ii+1,
+ encrypted_encapsulated_packet=peer_1.encrypt_transport(
+ (IP(src="10.11.3.1", dst=self.pg0.remote_ip4, ttl=20) /
+ UDP(sport=222, dport=223) /
+ Raw())))) for ii in range(255)]
+
+ rxs = self.send_and_expect(self.pg1, p, self.pg0)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].dst, self.pg0.remote_ip4)
+ self.assertEqual(rx[IP].ttl, 19)
+
+ peer_1.remove_vpp_config()
+ wg0.remove_vpp_config()
+
+ def test_wg_multi_peer(self):
+ """ multiple peer setup """
+ port = 12343
+
+ # Create interfaces
+ wg0 = VppWgInterface(self,
+ self.pg1.local_ip4,
+ port).add_vpp_config()
+ wg1 = VppWgInterface(self,
+ self.pg2.local_ip4,
+ port+1).add_vpp_config()
+ wg0.admin_up()
+ wg1.admin_up()
+
+ # Check peer counter
+ self.assertEqual(len(self.vapi.wireguard_peers_dump()), 0)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # Create many peers on sencond interface
+ NUM_PEERS = 16
+ self.pg2.generate_remote_hosts(NUM_PEERS)
+ self.pg2.configure_ipv4_neighbors()
+ self.pg1.generate_remote_hosts(NUM_PEERS)
+ self.pg1.configure_ipv4_neighbors()
+
+ peers_1 = []
+ peers_2 = []
+ for i in range(NUM_PEERS):
+ peers_1.append(VppWgPeer(self,
+ wg0,
+ self.pg1.remote_hosts[i].ip4,
+ port+1+i,
+ ["10.0.%d.4/32" % i]).add_vpp_config())
+ peers_2.append(VppWgPeer(self,
+ wg1,
+ self.pg2.remote_hosts[i].ip4,
+ port+100+i,
+ ["10.100.%d.4/32" % i]).add_vpp_config())
+
+ self.assertEqual(len(self.vapi.wireguard_peers_dump()), NUM_PEERS*2)
+
+ self.logger.info(self.vapi.cli("show wireguard peer"))
+ self.logger.info(self.vapi.cli("show wireguard interface"))
+ self.logger.info(self.vapi.cli("show adj 37"))
+ self.logger.info(self.vapi.cli("sh ip fib 172.16.3.17"))
+ self.logger.info(self.vapi.cli("sh ip fib 10.11.3.0"))
+
+ # remove peers
+ for p in peers_1:
+ self.assertTrue(p.query_vpp_config())
+ p.remove_vpp_config()
+ for p in peers_2:
+ self.assertTrue(p.query_vpp_config())
+ p.remove_vpp_config()
+
+ wg0.remove_vpp_config()
+ wg1.remove_vpp_config()
+
+
+class WireguardHandoffTests(TestWg):
+ """ Wireguard Tests in multi worker setup """
+ vpp_worker_count = 2
+
+ def test_wg_peer_init(self):
+ """ Handoff """
+ wg_output_node_name = '/err/wg-output-tun/'
+ wg_input_node_name = '/err/wg-input/'
+
+ port = 12353
+
+ # Create interfaces
+ wg0 = VppWgInterface(self,
+ self.pg1.local_ip4,
+ port).add_vpp_config()
+ wg0.admin_up()
+ wg0.config_ip4()
+
+ peer_1 = VppWgPeer(self,
+ wg0,
+ self.pg1.remote_ip4,
+ port+1,
+ ["10.11.2.0/24",
+ "10.11.3.0/24"]).add_vpp_config()
+ self.assertEqual(len(self.vapi.wireguard_peers_dump()), 1)
+
+ # send a valid handsake init for which we expect a response
+ p = peer_1.mk_handshake(self.pg1)
+
+ rx = self.send_and_expect(self.pg1, [p], self.pg1)
+
+ peer_1.consume_response(rx[0])
+
+ # send a data packet from the peer through the tunnel
+ # this completes the handshake and pins the peer to worker 0
+ p = (IP(src="10.11.3.1", dst=self.pg0.remote_ip4, ttl=20) /
+ UDP(sport=222, dport=223) /
+ Raw())
+ d = peer_1.encrypt_transport(p)
+ p = (peer_1.mk_tunnel_header(self.pg1) /
+ (Wireguard(message_type=4, reserved_zero=0) /
+ WireguardTransport(receiver_index=peer_1.sender,
+ counter=0,
+ encrypted_encapsulated_packet=d)))
+ rxs = self.send_and_expect(self.pg1, [p], self.pg0,
+ worker=0)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].dst, self.pg0.remote_ip4)
+ self.assertEqual(rx[IP].ttl, 19)
+
+ # send a packets that are routed into the tunnel
+ # and pins the peer tp worker 1
+ pe = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst="10.11.3.2") /
+ UDP(sport=555, dport=556) /
+ Raw(b'\x00' * 80))
+ rxs = self.send_and_expect(self.pg0, pe * 255, self.pg1, worker=1)
+ peer_1.validate_encapped(rxs, pe)
+
+ # send packets into the tunnel, from the other worker
+ p = [(peer_1.mk_tunnel_header(self.pg1) /
+ Wireguard(message_type=4, reserved_zero=0) /
+ WireguardTransport(
+ receiver_index=peer_1.sender,
+ counter=ii+1,
+ encrypted_encapsulated_packet=peer_1.encrypt_transport(
+ (IP(src="10.11.3.1", dst=self.pg0.remote_ip4, ttl=20) /
+ UDP(sport=222, dport=223) /
+ Raw())))) for ii in range(255)]
+
+ rxs = self.send_and_expect(self.pg1, p, self.pg0, worker=1)
+
+ for rx in rxs:
+ self.assertEqual(rx[IP].dst, self.pg0.remote_ip4)
+ self.assertEqual(rx[IP].ttl, 19)
+
+ # send a packets that are routed into the tunnel
+ # from owrker 0
+ rxs = self.send_and_expect(self.pg0, pe * 255, self.pg1, worker=0)
+
+ peer_1.validate_encapped(rxs, pe)
+
+ peer_1.remove_vpp_config()
+ wg0.remove_vpp_config()
diff --git a/test/vpp_acl.py b/test/vpp_acl.py
new file mode 100644
index 00000000000..2d2f7ca257b
--- /dev/null
+++ b/test/vpp_acl.py
@@ -0,0 +1,476 @@
+from ipaddress import IPv4Network
+
+from vpp_object import VppObject
+from vpp_papi import VppEnum
+from vpp_ip import INVALID_INDEX
+from vpp_papi_provider import UnexpectedApiReturnValueError
+
+
+class VppAclPlugin(VppObject):
+
+ def __init__(self, test, enable_intf_counters=False):
+ self._test = test
+ self.enable_intf_counters = enable_intf_counters
+
+ @property
+ def enable_intf_counters(self):
+ return self._enable_intf_counters
+
+ @enable_intf_counters.setter
+ def enable_intf_counters(self, enable):
+ self.vapi.acl_stats_intf_counters_enable(enable=enable)
+
+ def add_vpp_config(self):
+ pass
+
+ def remove_vpp_config(self):
+ pass
+
+ def query_vpp_config(self):
+ pass
+
+ def object_id(self):
+ return ("acl-plugin-%d" % (self._sw_if_index))
+
+
+class AclRule():
+ """ ACL Rule """
+
+ # port ranges
+ PORTS_ALL = -1
+ PORTS_RANGE = 0
+ PORTS_RANGE_2 = 1
+ udp_sport_from = 10
+ udp_sport_to = udp_sport_from + 5
+ udp_dport_from = 20000
+ udp_dport_to = udp_dport_from + 5000
+ tcp_sport_from = 30
+ tcp_sport_to = tcp_sport_from + 5
+ tcp_dport_from = 40000
+ tcp_dport_to = tcp_dport_from + 5000
+
+ udp_sport_from_2 = 90
+ udp_sport_to_2 = udp_sport_from_2 + 5
+ udp_dport_from_2 = 30000
+ udp_dport_to_2 = udp_dport_from_2 + 5000
+ tcp_sport_from_2 = 130
+ tcp_sport_to_2 = tcp_sport_from_2 + 5
+ tcp_dport_from_2 = 20000
+ tcp_dport_to_2 = tcp_dport_from_2 + 5000
+
+ icmp4_type = 8 # echo request
+ icmp4_code = 3
+ icmp6_type = 128 # echo request
+ icmp6_code = 3
+
+ icmp4_type_2 = 8
+ icmp4_code_from_2 = 5
+ icmp4_code_to_2 = 20
+ icmp6_type_2 = 128
+ icmp6_code_from_2 = 8
+ icmp6_code_to_2 = 42
+
+ def __init__(self, is_permit, src_prefix=IPv4Network('0.0.0.0/0'),
+ dst_prefix=IPv4Network('0.0.0.0/0'),
+ proto=0, ports=PORTS_ALL, sport_from=None, sport_to=None,
+ dport_from=None, dport_to=None):
+ self.is_permit = is_permit
+ self.src_prefix = src_prefix
+ self.dst_prefix = dst_prefix
+ self._proto = proto
+ self._ports = ports
+ # assign ports by range
+ self.update_ports()
+ # assign specified ports
+ if sport_from:
+ self.sport_from = sport_from
+ if sport_to:
+ self.sport_to = sport_to
+ if dport_from:
+ self.dport_from = dport_from
+ if dport_to:
+ self.dport_to = dport_to
+
+ def __copy__(self):
+ new_rule = AclRule(self.is_permit, self.src_prefix, self.dst_prefix,
+ self._proto, self._ports, self.sport_from,
+ self.sport_to, self.dport_from, self.dport_to)
+ return new_rule
+
+ def update_ports(self):
+ if self._ports == self.PORTS_ALL:
+ self.sport_from = 0
+ self.dport_from = 0
+ self.sport_to = 65535
+ if self._proto == 1 or self._proto == 58:
+ self.sport_to = 255
+ self.dport_to = self.sport_to
+ elif self._ports == self.PORTS_RANGE:
+ if self._proto == VppEnum.vl_api_ip_proto_t.IP_API_PROTO_ICMP:
+ self.sport_from = self.icmp4_type
+ self.sport_to = self.icmp4_type
+ self.dport_from = self.icmp4_code
+ self.dport_to = self.icmp4_code
+ elif self._proto == VppEnum.vl_api_ip_proto_t.IP_API_PROTO_ICMP6:
+ self.sport_from = self.icmp6_type
+ self.sport_to = self.icmp6_type
+ self.dport_from = self.icmp6_code
+ self.dport_to = self.icmp6_code
+ elif self._proto == VppEnum.vl_api_ip_proto_t.IP_API_PROTO_TCP:
+ self.sport_from = self.tcp_sport_from
+ self.sport_to = self.tcp_sport_to
+ self.dport_from = self.tcp_dport_from
+ self.dport_to = self.tcp_dport_to
+ elif self._proto == VppEnum.vl_api_ip_proto_t.IP_API_PROTO_UDP:
+ self.sport_from = self.udp_sport_from
+ self.sport_to = self.udp_sport_to
+ self.dport_from = self.udp_dport_from
+ self.dport_to = self.udp_dport_to
+ elif self._ports == self.PORTS_RANGE_2:
+ if self._proto == VppEnum.vl_api_ip_proto_t.IP_API_PROTO_ICMP:
+ self.sport_from = self.icmp4_type_2
+ self.sport_to = self.icmp4_type_2
+ self.dport_from = self.icmp4_code_from_2
+ self.dport_to = self.icmp4_code_to_2
+ elif self._proto == VppEnum.vl_api_ip_proto_t.IP_API_PROTO_ICMP6:
+ self.sport_from = self.icmp6_type_2
+ self.sport_to = self.icmp6_type_2
+ self.dport_from = self.icmp6_code_from_2
+ self.dport_to = self.icmp6_code_to_2
+ elif self._proto == VppEnum.vl_api_ip_proto_t.IP_API_PROTO_TCP:
+ self.sport_from = self.tcp_sport_from_2
+ self.sport_to = self.tcp_sport_to_2
+ self.dport_from = self.tcp_dport_from_2
+ self.dport_to = self.tcp_dport_to_2
+ elif self._proto == VppEnum.vl_api_ip_proto_t.IP_API_PROTO_UDP:
+ self.sport_from = self.udp_sport_from_2
+ self.sport_to = self.udp_sport_to_2
+ self.dport_from = self.udp_dport_from_2
+ self.dport_to = self.udp_dport_to_2
+ else:
+ self.sport_from = self._ports
+ self.sport_to = self._ports
+ self.dport_from = self._ports
+ self.dport_to = self._ports
+
+ @property
+ def proto(self):
+ return self._proto
+
+ @proto.setter
+ def proto(self, proto):
+ self._proto = proto
+ self.update_ports()
+
+ @property
+ def ports(self):
+ return self._ports
+
+ @ports.setter
+ def ports(self, ports):
+ self._ports = ports
+ self.update_ports()
+
+ def encode(self):
+ return {'is_permit': self.is_permit, 'proto': self.proto,
+ 'srcport_or_icmptype_first': self.sport_from,
+ 'srcport_or_icmptype_last': self.sport_to,
+ 'src_prefix': self.src_prefix,
+ 'dstport_or_icmpcode_first': self.dport_from,
+ 'dstport_or_icmpcode_last': self.dport_to,
+ 'dst_prefix': self.dst_prefix}
+
+
+class VppAcl(VppObject):
+ """ VPP ACL """
+
+ def __init__(self, test, rules, acl_index=INVALID_INDEX, tag=None):
+ self._test = test
+ self._acl_index = acl_index
+ self.tag = tag
+ self._rules = rules
+
+ @property
+ def rules(self):
+ return self._rules
+
+ @property
+ def acl_index(self):
+ return self._acl_index
+
+ @property
+ def count(self):
+ return len(self._rules)
+
+ def encode_rules(self):
+ rules = []
+ for rule in self._rules:
+ rules.append(rule.encode())
+ return rules
+
+ def add_vpp_config(self, expect_error=False):
+ try:
+ reply = self._test.vapi.acl_add_replace(
+ acl_index=self._acl_index, tag=self.tag, count=self.count,
+ r=self.encode_rules())
+ self._acl_index = reply.acl_index
+ self._test.registry.register(self, self._test.logger)
+ if expect_error:
+ self._test.fail("Unexpected api reply")
+ return self
+ except UnexpectedApiReturnValueError:
+ if not expect_error:
+ self._test.fail("Unexpected api reply")
+ return None
+
+ def modify_vpp_config(self, rules):
+ self._rules = rules
+ self.add_vpp_config()
+
+ def remove_vpp_config(self, expect_error=False):
+ try:
+ self._test.vapi.acl_del(acl_index=self._acl_index)
+ if expect_error:
+ self._test.fail("Unexpected api reply")
+ except UnexpectedApiReturnValueError:
+ if not expect_error:
+ self._test.fail("Unexpected api reply")
+
+ def dump(self):
+ return self._test.vapi.acl_dump(acl_index=self._acl_index)
+
+ def query_vpp_config(self):
+ dump = self.dump()
+ for rule in dump:
+ if rule.acl_index == self._acl_index:
+ return True
+ return False
+
+ def object_id(self):
+ return ("acl-%s-%d" % (self.tag, self._acl_index))
+
+
+class VppEtypeWhitelist(VppObject):
+ """ VPP Etype Whitelist """
+
+ def __init__(self, test, sw_if_index, whitelist, n_input=0):
+ self._test = test
+ self.whitelist = whitelist
+ self.n_input = n_input
+ self._sw_if_index = sw_if_index
+
+ @property
+ def sw_if_index(self):
+ return self._sw_if_index
+
+ @property
+ def count(self):
+ return len(self.whitelist)
+
+ def add_vpp_config(self):
+ self._test.vapi.acl_interface_set_etype_whitelist(
+ sw_if_index=self._sw_if_index, count=self.count,
+ n_input=self.n_input, whitelist=self.whitelist)
+ self._test.registry.register(self, self._test.logger)
+ return self
+
+ def remove_vpp_config(self):
+ self._test.vapi.acl_interface_set_etype_whitelist(
+ sw_if_index=self._sw_if_index, count=0, n_input=0, whitelist=[])
+
+ def query_vpp_config(self):
+ self._test.vapi.acl_interface_etype_whitelist_dump(
+ sw_if_index=self._sw_if_index)
+ return False
+
+ def object_id(self):
+ return ("acl-etype_wl-%d" % (self._sw_if_index))
+
+
+class VppAclInterface(VppObject):
+ """ VPP ACL Interface """
+
+ def __init__(self, test, sw_if_index, acls, n_input=0):
+ self._test = test
+ self._sw_if_index = sw_if_index
+ self.n_input = n_input
+ self.acls = acls
+
+ @property
+ def sw_if_index(self):
+ return self._sw_if_index
+
+ @property
+ def count(self):
+ return len(self.acls)
+
+ def encode_acls(self):
+ acls = []
+ for acl in self.acls:
+ acls.append(acl.acl_index)
+ return acls
+
+ def add_vpp_config(self, expect_error=False):
+ try:
+ reply = self._test.vapi.acl_interface_set_acl_list(
+ sw_if_index=self._sw_if_index, n_input=self.n_input,
+ count=self.count, acls=self.encode_acls())
+ self._test.registry.register(self, self._test.logger)
+ if expect_error:
+ self._test.fail("Unexpected api reply")
+ return self
+ except UnexpectedApiReturnValueError:
+ if not expect_error:
+ self._test.fail("Unexpected api reply")
+ return None
+
+ def remove_vpp_config(self, expect_error=False):
+ try:
+ reply = self._test.vapi.acl_interface_set_acl_list(
+ sw_if_index=self._sw_if_index, n_input=0, count=0, acls=[])
+ if expect_error:
+ self._test.fail("Unexpected api reply")
+ except UnexpectedApiReturnValueError:
+ if not expect_error:
+ self._test.fail("Unexpected api reply")
+
+ def query_vpp_config(self):
+ dump = self._test.vapi.acl_interface_list_dump(
+ sw_if_index=self._sw_if_index)
+ for acl_list in dump:
+ if acl_list.count > 0:
+ return True
+ return False
+
+ def object_id(self):
+ return ("acl-if-list-%d" % (self._sw_if_index))
+
+
+class MacipRule():
+ """ Mac Ip rule """
+
+ def __init__(self, is_permit, src_mac=0, src_mac_mask=0,
+ src_prefix=IPv4Network('0.0.0.0/0')):
+ self.is_permit = is_permit
+ self.src_mac = src_mac
+ self.src_mac_mask = src_mac_mask
+ self.src_prefix = src_prefix
+
+ def encode(self):
+ return {'is_permit': self.is_permit, 'src_mac': self.src_mac,
+ 'src_mac_mask': self.src_mac_mask,
+ 'src_prefix': self.src_prefix}
+
+
+class VppMacipAcl(VppObject):
+ """ Vpp Mac Ip ACL """
+
+ def __init__(self, test, rules, acl_index=INVALID_INDEX, tag=None):
+ self._test = test
+ self._acl_index = acl_index
+ self.tag = tag
+ self._rules = rules
+
+ @property
+ def acl_index(self):
+ return self._acl_index
+
+ @property
+ def rules(self):
+ return self._rules
+
+ @property
+ def count(self):
+ return len(self._rules)
+
+ def encode_rules(self):
+ rules = []
+ for rule in self._rules:
+ rules.append(rule.encode())
+ return rules
+
+ def add_vpp_config(self, expect_error=False):
+ try:
+ reply = self._test.vapi.macip_acl_add_replace(
+ acl_index=self._acl_index, tag=self.tag, count=self.count,
+ r=self.encode_rules())
+ self._acl_index = reply.acl_index
+ self._test.registry.register(self, self._test.logger)
+ if expect_error:
+ self._test.fail("Unexpected api reply")
+ return self
+ except UnexpectedApiReturnValueError:
+ if not expect_error:
+ self._test.fail("Unexpected api reply")
+ return None
+
+ def modify_vpp_config(self, rules):
+ self._rules = rules
+ self.add_vpp_config()
+
+ def remove_vpp_config(self, expect_error=False):
+ try:
+ self._test.vapi.macip_acl_del(acl_index=self._acl_index)
+ if expect_error:
+ self._test.fail("Unexpected api reply")
+ except UnexpectedApiReturnValueError:
+ if not expect_error:
+ self._test.fail("Unexpected api reply")
+
+ def dump(self):
+ return self._test.vapi.macip_acl_dump(acl_index=self._acl_index)
+
+ def query_vpp_config(self):
+ dump = self.dump()
+ for rule in dump:
+ if rule.acl_index == self._acl_index:
+ return True
+ return False
+
+ def object_id(self):
+ return ("macip-acl-%s-%d" % (self.tag, self._acl_index))
+
+
+class VppMacipAclInterface(VppObject):
+ """ VPP Mac Ip ACL Interface """
+
+ def __init__(self, test, sw_if_index, acls):
+ self._test = test
+ self._sw_if_index = sw_if_index
+ self.acls = acls
+
+ @property
+ def sw_if_index(self):
+ return self._sw_if_index
+
+ @property
+ def count(self):
+ return len(self.acls)
+
+ def add_vpp_config(self):
+ for acl in self.acls:
+ self._test.vapi.macip_acl_interface_add_del(
+ is_add=True, sw_if_index=self._sw_if_index,
+ acl_index=acl.acl_index)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ for acl in self.acls:
+ self._test.vapi.macip_acl_interface_add_del(
+ is_add=False, sw_if_index=self._sw_if_index,
+ acl_index=acl.acl_index)
+
+ def dump(self):
+ return self._test.vapi.macip_acl_interface_list_dump(
+ sw_if_index=self._sw_if_index)
+
+ def query_vpp_config(self):
+ dump = self.dump()
+ for acl_list in dump:
+ for acl_index in acl_list.acls:
+ if acl_index != INVALID_INDEX:
+ return True
+ return False
+
+ def object_id(self):
+ return ("macip-acl-if-list-%d" % (self._sw_if_index))
diff --git a/test/vpp_bier.py b/test/vpp_bier.py
new file mode 100644
index 00000000000..6e087a8ee0b
--- /dev/null
+++ b/test/vpp_bier.py
@@ -0,0 +1,293 @@
+"""
+ BIER Tables and Routes
+"""
+
+import socket
+from vpp_object import VppObject
+from vpp_ip_route import MPLS_LABEL_INVALID, VppRoutePath, VppMplsLabel
+
+
+class BIER_HDR_PAYLOAD:
+ BIER_HDR_PROTO_MPLS_DOWN_STREAM = 1
+ BIER_HDR_PROTO_MPLS_UP_STREAM = 2
+ BIER_HDR_PROTO_ETHERNET = 3
+ BIER_HDR_PROTO_IPV4 = 4
+ BIER_HDR_PROTO_IPV6 = 5
+ BIER_HDR_PROTO_VXLAN = 6
+ BIER_HDR_PROTO_CTRL = 7
+ BIER_HDR_PROTO_OAM = 8
+
+
+class VppBierTableID():
+ def __init__(self, sub_domain_id, set_id, hdr_len_id):
+ self.set_id = set_id
+ self.sub_domain_id = sub_domain_id
+ self.hdr_len_id = hdr_len_id
+
+
+def find_bier_table(test, bti):
+ tables = test.vapi.bier_table_dump()
+ for t in tables:
+ if bti.set_id == t.bt_tbl_id.bt_set \
+ and bti.sub_domain_id == t.bt_tbl_id.bt_sub_domain \
+ and bti.hdr_len_id == t.bt_tbl_id.bt_hdr_len_id:
+ return True
+ return False
+
+
+def find_bier_route(test, bti, bp):
+ routes = test.vapi.bier_route_dump(bti)
+ for r in routes:
+ if bti.set_id == r.br_route.br_tbl_id.bt_set \
+ and bti.sub_domain_id == r.br_route.br_tbl_id.bt_sub_domain \
+ and bti.hdr_len_id == r.br_route.br_tbl_id.bt_hdr_len_id \
+ and bp == r.br_route.br_bp:
+ return True
+ return False
+
+
+def find_bier_disp_table(test, bdti):
+ tables = test.vapi.bier_disp_table_dump()
+ for t in tables:
+ if bdti == t.bdt_tbl_id:
+ return True
+ return False
+
+
+def find_bier_disp_entry(test, bdti, bp):
+ entries = test.vapi.bier_disp_entry_dump(bdti)
+ for e in entries:
+ if bp == e.bde_bp \
+ and bdti == e.bde_tbl_id:
+ return True
+ return False
+
+
+def find_bier_imp(test, bti, bp):
+ imps = test.vapi.bier_imp_dump()
+ for i in imps:
+ if bti.set_id == i.bi_tbl_id.bt_set \
+ and bti.sub_domain_id == i.bi_tbl_id.bt_sub_domain \
+ and bti.hdr_len_id == i.bi_tbl_id.bt_hdr_len_id \
+ and bp == i.bi_src:
+ return True
+ return False
+
+
+class VppBierTable(VppObject):
+ """
+ BIER Table
+ """
+
+ def __init__(self, test, id, mpls_label):
+ self._test = test
+ self.id = id
+ self.mpls_label = mpls_label
+
+ def add_vpp_config(self):
+ self._test.vapi.bier_table_add_del(
+ self.id,
+ self.mpls_label,
+ is_add=1)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.bier_table_add_del(
+ self.id,
+ self.mpls_label,
+ is_add=0)
+
+ def object_id(self):
+ return "bier-table;[%d:%d:%d]" % (self.id.set_id,
+ self.id.sub_domain_id,
+ self.id.hdr_len_id)
+
+ def query_vpp_config(self):
+ return find_bier_table(self._test, self.id)
+
+
+class VppBierRoute(VppObject):
+ """
+ BIER route
+ """
+
+ def __init__(self, test, tbl_id, bp, paths):
+ self._test = test
+ self.tbl_id = tbl_id
+ self.bp = bp
+ self.paths = paths
+ self.encoded_paths = []
+ for path in self.paths:
+ self.encoded_paths.append(path.encode())
+
+ def add_vpp_config(self):
+ self._test.vapi.bier_route_add_del(
+ self.tbl_id,
+ self.bp,
+ self.encoded_paths,
+ is_add=1)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.bier_route_add_del(
+ self.tbl_id,
+ self.bp,
+ self.encoded_paths,
+ is_add=0)
+
+ def update_paths(self, paths):
+ self.paths = paths
+ self.encoded_paths = []
+ for path in self.paths:
+ self.encoded_paths.append(path.encode())
+ self._test.vapi.bier_route_add_del(
+ self.tbl_id,
+ self.bp,
+ self.encoded_paths,
+ is_replace=1)
+
+ def add_path(self, path):
+ self.encoded_paths.append(path.encode())
+ self._test.vapi.bier_route_add_del(
+ self.tbl_id,
+ self.bp,
+ [path.encode()],
+ is_add=1,
+ is_replace=0)
+ self.paths.append(path)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_path(self, path):
+ self.encoded_paths.remove(path.encode())
+ self._test.vapi.bier_route_add_del(
+ self.tbl_id,
+ self.bp,
+ [path.encode()],
+ is_add=0,
+ is_replace=0)
+ self.paths.remove(path)
+
+ def remove_all_paths(self):
+ self._test.vapi.bier_route_add_del(
+ self.tbl_id,
+ self.bp,
+ [],
+ is_add=0,
+ is_replace=1)
+ self.paths = []
+
+ def object_id(self):
+ return "bier-route;[%d:%d:%d:%d]" % (self.tbl_id.set_id,
+ self.tbl_id.sub_domain_id,
+ self.tbl_id.hdr_len_id,
+ self.bp)
+
+ def query_vpp_config(self):
+ return find_bier_route(self._test, self.tbl_id, self.bp)
+
+
+class VppBierImp(VppObject):
+ """
+ BIER route
+ """
+
+ def __init__(self, test, tbl_id, src, ibytes):
+ self._test = test
+ self.tbl_id = tbl_id
+ self.ibytes = ibytes
+ self.src = src
+
+ def add_vpp_config(self):
+ res = self._test.vapi.bier_imp_add(
+ self.tbl_id,
+ self.src,
+ self.ibytes)
+ self.bi_index = res.bi_index
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.bier_imp_del(
+ self.bi_index)
+
+ def object_id(self):
+ return "bier-imp;[%d:%d:%d:%d]" % (self.tbl_id.set_id,
+ self.tbl_id.sub_domain_id,
+ self.tbl_id.hdr_len_id,
+ self.src)
+
+ def query_vpp_config(self):
+ return find_bier_imp(self._test, self.tbl_id, self.src)
+
+
+class VppBierDispTable(VppObject):
+ """
+ BIER Disposition Table
+ """
+
+ def __init__(self, test, id):
+ self._test = test
+ self.id = id
+
+ def add_vpp_config(self):
+ self._test.vapi.bier_disp_table_add_del(
+ self.id,
+ is_add=1)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.bier_disp_table_add_del(
+ self.id,
+ is_add=0)
+
+ def object_id(self):
+ return "bier-disp-table;[%d]" % (self.id)
+
+ def query_vpp_config(self):
+ return find_bier_disp_table(self._test, self.id)
+
+
+class VppBierDispEntry(VppObject):
+ """
+ BIER Disposition Entry
+ """
+
+ def __init__(self, test, tbl_id, bp, payload_proto, nh_proto,
+ nh, nh_tbl, rpf_id=~0):
+ self._test = test
+ self.tbl_id = tbl_id
+ self.nh_tbl = nh_tbl
+ self.nh_proto = nh_proto
+ self.bp = bp
+ self.payload_proto = payload_proto
+ self.rpf_id = rpf_id
+ self.nh = socket.inet_pton(socket.AF_INET, nh)
+
+ def add_vpp_config(self):
+ self._test.vapi.bier_disp_entry_add_del(
+ self.tbl_id,
+ self.bp,
+ self.payload_proto,
+ self.nh_proto,
+ self.nh,
+ self.nh_tbl,
+ self.rpf_id,
+ is_add=1)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.bier_disp_entry_add_del(
+ self.tbl_id,
+ self.bp,
+ self.payload_proto,
+ self.nh_proto,
+ self.nh,
+ self.nh_tbl,
+ self.rpf_id,
+ is_add=0)
+
+ def object_id(self):
+ return "bier-disp-entry;[%d:%d]" % (self.tbl_id,
+ self.bp)
+
+ def query_vpp_config(self):
+ return find_bier_disp_entry(self._test, self.tbl_id, self.bp)
diff --git a/test/vpp_bond_interface.py b/test/vpp_bond_interface.py
new file mode 100644
index 00000000000..60c1ac1557b
--- /dev/null
+++ b/test/vpp_bond_interface.py
@@ -0,0 +1,52 @@
+from vpp_object import VppObject
+from vpp_interface import VppInterface
+
+
+class VppBondInterface(VppInterface):
+ """VPP bond interface."""
+
+ def __init__(self, test, mode, lb=0, numa_only=0, enable_gso=0,
+ use_custom_mac=0, mac_address='', id=0xFFFFFFFF):
+
+ """ Create VPP Bond interface """
+ super(VppBondInterface, self).__init__(test)
+ self.mode = mode
+ self.lb = lb
+ self.numa_only = numa_only
+ self.enable_gso = enable_gso
+ self.use_custom_mac = use_custom_mac
+ self.mac_address = mac_address
+ self.id = id
+
+ def add_vpp_config(self):
+ r = self.test.vapi.bond_create2(self.mode,
+ self.lb,
+ self.numa_only,
+ self.enable_gso,
+ self.use_custom_mac,
+ self.mac_address,
+ self.id)
+ self.set_sw_if_index(r.sw_if_index)
+
+ def remove_vpp_config(self):
+ self.test.vapi.bond_delete(self.sw_if_index)
+
+ def add_member_vpp_bond_interface(self,
+ sw_if_index,
+ is_passive=0,
+ is_long_timeout=0):
+ self.test.vapi.bond_add_member(sw_if_index,
+ self.sw_if_index,
+ is_passive,
+ is_long_timeout)
+
+ def detach_vpp_bond_interface(self,
+ sw_if_index):
+ self.test.vapi.bond_detach_member(sw_if_index)
+
+ def is_interface_config_in_dump(self, dump):
+ for i in dump:
+ if i.sw_if_index == self.sw_if_index:
+ return True
+ else:
+ return False
diff --git a/test/vpp_dhcp.py b/test/vpp_dhcp.py
new file mode 100644
index 00000000000..f8265a26252
--- /dev/null
+++ b/test/vpp_dhcp.py
@@ -0,0 +1,131 @@
+from vpp_object import VppObject
+
+
+class VppDHCPProxy(VppObject):
+
+ def __init__(
+ self,
+ test,
+ dhcp_server,
+ dhcp_src_address,
+ rx_vrf_id=0,
+ server_vrf_id=0,
+ ):
+ self._test = test
+ self._rx_vrf_id = rx_vrf_id
+ self._server_vrf_id = server_vrf_id
+ self._dhcp_server = dhcp_server
+ self._dhcp_src_address = dhcp_src_address
+
+ def set_proxy(
+ self,
+ dhcp_server,
+ dhcp_src_address,
+ rx_vrf_id=0,
+ server_vrf_id=0):
+ if self.query_vpp_config():
+ raise Exception('Vpp config present')
+ self._rx_vrf_id = rx_vrf_id
+ self._server_vrf_id = server_vrf_id
+ self._dhcp_server = dhcp_server
+ self._dhcp_src_address = dhcp_src_address
+
+ def add_vpp_config(self):
+ self._test.vapi.dhcp_proxy_config(
+ is_add=1,
+ rx_vrf_id=self._rx_vrf_id,
+ server_vrf_id=self._server_vrf_id,
+ dhcp_server=self._dhcp_server,
+ dhcp_src_address=self._dhcp_src_address)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.dhcp_proxy_config(
+ rx_vrf_id=self._rx_vrf_id,
+ server_vrf_id=self._server_vrf_id,
+ dhcp_server=self._dhcp_server,
+ dhcp_src_address=self._dhcp_src_address,
+ is_add=0)
+
+ def get_vpp_dump(self):
+ dump = self._test.vapi.dhcp_proxy_dump()
+ for entry in dump:
+ if entry.rx_vrf_id == self._rx_vrf_id:
+ return entry
+
+ def query_vpp_config(self):
+ dump = self.get_vpp_dump()
+ return True if dump else False
+
+ def object_id(self):
+ return "dhcp-proxy-%d" % self._rx_vrf_id
+
+
+class VppDHCPClient(VppObject):
+
+ def __init__(
+ self,
+ test,
+ sw_if_index,
+ hostname,
+ id=None,
+ want_dhcp_event=False,
+ set_broadcast_flag=True,
+ dscp=None,
+ pid=None):
+ self._test = test
+ self._sw_if_index = sw_if_index
+ self._hostname = hostname
+ self._id = id
+ self._want_dhcp_event = want_dhcp_event
+ self._set_broadcast_flag = set_broadcast_flag
+ self._dscp = dscp
+ self._pid = pid
+
+ def set_client(
+ self,
+ sw_if_index,
+ hostname,
+ id=None,
+ want_dhcp_event=False,
+ set_broadcast_flag=True,
+ dscp=None,
+ pid=None):
+ if self.query_vpp_config():
+ raise Exception('Vpp config present')
+ self._sw_if_index = sw_if_index
+ self._hostname = hostname
+ self._id = id
+ self._want_dhcp_event = want_dhcp_event
+ self._set_broadcast_flag = set_broadcast_flag
+ self._dscp = dscp
+ self._pid = pid
+
+ def add_vpp_config(self):
+ id = self._id.encode('ascii') if self._id else None
+ client = {'sw_if_index': self._sw_if_index, 'hostname': self._hostname,
+ 'id': id,
+ 'want_dhcp_event': self._want_dhcp_event,
+ 'set_broadcast_flag': self._set_broadcast_flag,
+ 'dscp': self._dscp, 'pid': self._pid}
+ self._test.vapi.dhcp_client_config(is_add=1, client=client)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ client = client = {
+ 'sw_if_index': self._sw_if_index,
+ 'hostname': self._hostname}
+ self._test.vapi.dhcp_client_config(client=client, is_add=0)
+
+ def get_vpp_dump(self):
+ dump = self._test.vapi.dhcp_client_dump()
+ for entry in dump:
+ if entry.client.sw_if_index == self._sw_if_index:
+ return entry
+
+ def query_vpp_config(self):
+ dump = self.get_vpp_dump()
+ return True if dump else False
+
+ def object_id(self):
+ return "dhcp-client-%s/%d" % (self._hostname, self._sw_if_index)
diff --git a/test/vpp_igmp.py b/test/vpp_igmp.py
new file mode 100644
index 00000000000..8f78a9b909a
--- /dev/null
+++ b/test/vpp_igmp.py
@@ -0,0 +1,75 @@
+
+from vpp_object import VppObject
+import socket
+
+
+class IGMP_MODE:
+ ROUTER = 0
+ HOST = 1
+
+
+class IGMP_FILTER:
+ INCLUDE = 1
+ EXCLUDE = 0
+
+
+def find_igmp_state(states, itf, gaddr, saddr):
+ for s in states:
+ if s.sw_if_index == itf.sw_if_index and \
+ str(s.gaddr) == gaddr and str(s.saddr) == saddr:
+ return True
+ return False
+
+
+def wait_for_igmp_event(test, timeout, itf, gaddr, saddr, ff):
+ ev = test.vapi.wait_for_event(timeout, "igmp_event")
+ if ev.sw_if_index == itf.sw_if_index and \
+ str(ev.gaddr) == gaddr and str(ev.saddr) == saddr and \
+ ev.filter == ff:
+ return True
+ return False
+
+
+class IgmpSG():
+ def __init__(self, gaddr, saddrs):
+ self.gaddr = gaddr
+ self.gaddr_p = socket.inet_pton(socket.AF_INET, gaddr)
+ self.saddrs = saddrs
+ self.saddrs_p = []
+ self.saddrs_encoded = []
+ for s in saddrs:
+ ss = socket.inet_pton(socket.AF_INET, s)
+ self.saddrs_p.append(ss)
+ self.saddrs_encoded.append(ss)
+
+
+class IgmpRecord():
+ def __init__(self, sg, type):
+ self.sg = sg
+ self.type = type
+
+
+class VppHostState(VppObject):
+ def __init__(self, test, filter, sw_if_index, sg):
+ self._test = test
+ self.sw_if_index = sw_if_index
+ self.filter = filter
+ self.sg = sg
+
+ def add_vpp_config(self):
+ self._test.vapi.igmp_listen(
+ self.filter, self.sw_if_index,
+ self.sg.saddrs_encoded, self.sg.gaddr_p)
+
+ def remove_vpp_config(self):
+ self._test.vapi.igmp_listen(
+ self.filter,
+ self.sw_if_index,
+ [],
+ self.sg.gaddr_p)
+
+ def object_id(self):
+ return "%s:%d" % (self.sg, self.sw_if_index)
+
+ def query_vpp_config(self):
+ return self._test.vapi.igmp_dump()
diff --git a/test/vpp_ikev2.py b/test/vpp_ikev2.py
new file mode 100644
index 00000000000..de2081268ee
--- /dev/null
+++ b/test/vpp_ikev2.py
@@ -0,0 +1,179 @@
+from ipaddress import IPv4Address, AddressValueError
+from vpp_object import VppObject
+from vpp_papi import VppEnum
+
+
+class AuthMethod:
+ v = {'rsa-sig': 1,
+ 'shared-key': 2}
+
+ @staticmethod
+ def value(key): return AuthMethod.v[key]
+
+
+class IDType:
+ v = {'ip4-addr': 1,
+ 'fqdn': 2,
+ 'ip6-addr': 5}
+
+ @staticmethod
+ def value(key): return IDType.v[key]
+
+
+class Profile(VppObject):
+ """ IKEv2 profile """
+ def __init__(self, test, profile_name):
+ self.test = test
+ self.vapi = test.vapi
+ self.profile_name = profile_name
+ self.udp_encap = False
+ self.natt = True
+
+ def disable_natt(self):
+ self.natt = False
+
+ def add_auth(self, method, data, is_hex=False):
+ if isinstance(method, int):
+ m = method
+ elif isinstance(method, str):
+ m = AuthMethod.value(method)
+ else:
+ raise Exception('unsupported type {}'.format(method))
+ self.auth = {'auth_method': m,
+ 'data': data,
+ 'is_hex': is_hex}
+
+ def add_local_id(self, id_type, data):
+ if isinstance(id_type, str):
+ t = IDType.value(id_type)
+ self.local_id = {'id_type': t,
+ 'data': data,
+ 'is_local': True}
+
+ def add_remote_id(self, id_type, data):
+ if isinstance(id_type, str):
+ t = IDType.value(id_type)
+ self.remote_id = {'id_type': t,
+ 'data': data,
+ 'is_local': False}
+
+ def add_local_ts(self, start_addr, end_addr, start_port=0, end_port=0xffff,
+ proto=0, is_ip4=True):
+ self.ts_is_ip4 = is_ip4
+ self.local_ts = {'is_local': True,
+ 'protocol_id': proto,
+ 'start_port': start_port,
+ 'end_port': end_port,
+ 'start_addr': start_addr,
+ 'end_addr': end_addr}
+
+ def add_remote_ts(self, start_addr, end_addr, start_port=0,
+ end_port=0xffff, proto=0):
+ try:
+ IPv4Address(start_addr)
+ is_ip4 = True
+ except AddressValueError:
+ is_ip4 = False
+ self.ts_is_ip4 = is_ip4
+ self.remote_ts = {'is_local': False,
+ 'protocol_id': proto,
+ 'start_port': start_port,
+ 'end_port': end_port,
+ 'start_addr': start_addr,
+ 'end_addr': end_addr}
+
+ def add_responder_hostname(self, hn):
+ self.responder_hostname = hn
+
+ def add_responder(self, responder):
+ self.responder = responder
+
+ def add_ike_transforms(self, tr):
+ self.ike_transforms = tr
+
+ def add_esp_transforms(self, tr):
+ self.esp_transforms = tr
+
+ def set_udp_encap(self, udp_encap):
+ self.udp_encap = udp_encap
+
+ def set_lifetime_data(self, data):
+ self.lifetime_data = data
+
+ def set_ipsec_over_udp_port(self, port):
+ self.ipsec_udp_port = {'is_set': 1,
+ 'port': port}
+
+ def set_tunnel_interface(self, sw_if_index):
+ self.tun_itf = sw_if_index
+
+ def object_id(self):
+ return 'ikev2-profile-%s' % self.profile_name
+
+ def remove_vpp_config(self):
+ self.vapi.ikev2_profile_add_del(name=self.profile_name, is_add=False)
+
+ def add_vpp_config(self):
+ self.vapi.ikev2_profile_add_del(name=self.profile_name, is_add=True)
+ if hasattr(self, 'auth'):
+ self.vapi.ikev2_profile_set_auth(name=self.profile_name,
+ data_len=len(self.auth['data']),
+ **self.auth)
+ if hasattr(self, 'local_id'):
+ self.vapi.ikev2_profile_set_id(name=self.profile_name,
+ data_len=len(self.local_id
+ ['data']),
+ **self.local_id)
+ if hasattr(self, 'remote_id'):
+ self.vapi.ikev2_profile_set_id(name=self.profile_name,
+ data_len=len(self.remote_id
+ ['data']),
+ **self.remote_id)
+ if hasattr(self, 'local_ts'):
+ self.vapi.ikev2_profile_set_ts(name=self.profile_name,
+ ts=self.local_ts)
+
+ if hasattr(self, 'remote_ts'):
+ self.vapi.ikev2_profile_set_ts(name=self.profile_name,
+ ts=self.remote_ts)
+
+ if hasattr(self, 'responder'):
+ self.vapi.ikev2_set_responder(name=self.profile_name,
+ responder=self.responder)
+
+ if hasattr(self, 'responder_hostname'):
+ print(self.responder_hostname)
+ self.vapi.ikev2_set_responder_hostname(name=self.profile_name,
+ **self.responder_hostname)
+
+ if hasattr(self, 'ike_transforms'):
+ self.vapi.ikev2_set_ike_transforms(name=self.profile_name,
+ tr=self.ike_transforms)
+
+ if hasattr(self, 'esp_transforms'):
+ self.vapi.ikev2_set_esp_transforms(name=self.profile_name,
+ tr=self.esp_transforms)
+
+ if self.udp_encap:
+ self.vapi.ikev2_profile_set_udp_encap(name=self.profile_name)
+
+ if hasattr(self, 'lifetime_data'):
+ self.vapi.ikev2_set_sa_lifetime(name=self.profile_name,
+ **self.lifetime_data)
+
+ if hasattr(self, 'ipsec_udp_port'):
+ self.vapi.ikev2_profile_set_ipsec_udp_port(name=self.profile_name,
+ **self.ipsec_udp_port)
+ if hasattr(self, 'tun_itf'):
+ self.vapi.ikev2_set_tunnel_interface(name=self.profile_name,
+ sw_if_index=self.tun_itf)
+
+ if not self.natt:
+ self.vapi.ikev2_profile_disable_natt(name=self.profile_name)
+
+ def query_vpp_config(self):
+ res = self.vapi.ikev2_profile_dump()
+ for r in res:
+ if r.profile.name == self.profile_name:
+ return r.profile
+ return None
diff --git a/test/vpp_lb.py b/test/vpp_lb.py
new file mode 100644
index 00000000000..d755cef70e5
--- /dev/null
+++ b/test/vpp_lb.py
@@ -0,0 +1,84 @@
+# Copyright (c) 2019. Vinci Consulting Corp. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import vpp_object
+
+
+class VppLbVip(vpp_object.VppObject):
+
+ def __init__(self, test, pfx, sfx, port, protocol):
+ self._test = test
+ self.pfx = pfx
+ self.sfx = sfx
+ self.port = port
+ self.protocol = protocol
+
+ def add_vpp_config(self):
+ self._test_vapi.lb_add_del_vip(pfx=self.pfx,
+ sfx=self.pfx,
+ port=self.port,
+ protocol=self.protocol)
+
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.lb_add_del_vip(pfx=self.pfx,
+ sfx=self.pfx,
+ port=self.port,
+ protocol=self.protocol,
+ is_del=1)
+
+ def query_vpp_config(self):
+ details = self._test.vapi.lb_add_del_vip(fx=self.pfx,
+ sfx=self.pfx,
+ port=self.port,
+ protocol=self.protocol)
+ return True if self == details else False
+
+
+class VppLbAs(vpp_object.VppObject):
+ def __init__(self, test, pfx, port, protocol, app_srv, is_del, is_flush):
+ self._test = test
+ # this is the vip
+ self.pfx = pfx
+ self.port = port
+ self.protocol = protocol
+
+ self.app_srv = app_srv
+ self.is_del = is_del
+ self.is_flush = is_flush
+
+ def add_vpp_config(self):
+ self._test_vapi.lb_add_del_as(pfx=self.pfx,
+ port=self.port,
+ protocol=self.protocol,
+ app_srv=self.app_srv,
+ is_flush=self.is_flush,
+ )
+
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self._test.vapi.lb_add_del_as(pfx=self.pfx,
+ port=self.port,
+ protocol=self.protocol,
+ app_srv=self.app_srv,
+ is_flush=self.is_flush,
+ is_del=1)
+
+ def query_vpp_config(self):
+ details = self._test.vapi.lb_as_dump(pfx=self.pfx,
+ port=self.port,
+ protocol=self.protocol)
+ return True if self == details else False
diff --git a/test/vpp_memif.py b/test/vpp_memif.py
new file mode 100644
index 00000000000..226f8af72b5
--- /dev/null
+++ b/test/vpp_memif.py
@@ -0,0 +1,140 @@
+import socket
+from ipaddress import IPv4Network
+
+from vpp_object import VppObject
+from vpp_papi import VppEnum
+
+
+def get_if_dump(dump, sw_if_index):
+ for d in dump:
+ if (d.sw_if_index == sw_if_index):
+ return d
+
+
+def query_all_memif_vpp_config(_test):
+ return _test.vapi.memif_dump()
+
+
+def remove_all_memif_vpp_config(_test):
+ dump = _test.vapi.memif_dump()
+ for d in dump:
+ _test.vapi.memif_delete(d.sw_if_index)
+ dump = _test.vapi.memif_socket_filename_dump()
+ for d in dump:
+ if d.socket_id != 0:
+ _test.vapi.memif_socket_filename_add_del(
+ 0, d.socket_id, d.socket_filename)
+
+
+class VppSocketFilename(VppObject):
+ def __init__(self, test, socket_id, socket_filename,
+ add_default_folder=False):
+ self._test = test
+ self.socket_id = socket_id
+ self.socket_filename = socket_filename
+
+ # if True insert default socket folder before socket filename,
+ # after adding vpp config
+ self.add_default_folder = add_default_folder
+
+ def add_vpp_config(self):
+ rv = self._test.vapi.memif_socket_filename_add_del(
+ 1, self.socket_id, self.socket_filename)
+ if self.add_default_folder:
+ self.socket_filename = "%s/%s" % (self._test.tempdir,
+ self.socket_filename)
+ return rv
+
+ def remove_vpp_config(self):
+ return self._test.vapi.memif_socket_filename_add_del(
+ 0, self.socket_id, self.socket_filename)
+
+ def query_vpp_config(self):
+ return self._test.vapi.memif_socket_filename_dump()
+
+ def object_id(self):
+ return "socket-filename-%d-%s" % (self.socket_id, self.socket_filename)
+
+
+class VppMemif(VppObject):
+ def __init__(self, test, role, mode, rx_queues=0, tx_queues=0, if_id=0,
+ socket_id=0, secret="", ring_size=0, buffer_size=0,
+ hw_addr=""):
+ self._test = test
+ self.role = role
+ self.mode = mode
+ self.rx_queues = rx_queues
+ self.tx_queues = tx_queues
+ self.if_id = if_id
+ self.socket_id = socket_id
+ self.secret = secret
+ self.ring_size = ring_size
+ self.buffer_size = buffer_size
+ self.hw_addr = hw_addr
+ self.sw_if_index = None
+ self.ip_prefix = IPv4Network("192.168.%d.%d/24" %
+ (self.if_id + 1, self.role + 1),
+ strict=False)
+
+ def add_vpp_config(self):
+ rv = self._test.vapi.memif_create(
+ role=self.role,
+ mode=self.mode,
+ rx_queues=self.rx_queues,
+ tx_queues=self.tx_queues,
+ id=self.if_id,
+ socket_id=self.socket_id,
+ secret=self.secret,
+ ring_size=self.ring_size,
+ buffer_size=self.buffer_size,
+ hw_addr=self.hw_addr)
+ try:
+ self.sw_if_index = rv.sw_if_index
+ except AttributeError:
+ # rv doesn't have .sw_if_index attribute
+ raise AttributeError("%s %s" % (self, rv))
+
+ return self.sw_if_index
+
+ def admin_up(self):
+ if self.sw_if_index:
+ return self._test.vapi.sw_interface_set_flags(
+ sw_if_index=self.sw_if_index, flags=1)
+
+ def admin_down(self):
+ if self.sw_if_index:
+ return self._test.vapi.sw_interface_set_flags(
+ sw_if_index=self.sw_if_index, flags=0)
+
+ def wait_for_link_up(self, timeout, step=1):
+ if not self.sw_if_index:
+ return False
+ while True:
+ dump = self.query_vpp_config()
+ f = VppEnum.vl_api_if_status_flags_t.IF_STATUS_API_FLAG_LINK_UP
+ if dump.flags & f:
+ return True
+ self._test.sleep(step)
+ timeout -= step
+ if timeout <= 0:
+ return False
+
+ def config_ip4(self):
+ return self._test.vapi.sw_interface_add_del_address(
+ sw_if_index=self.sw_if_index, prefix=self.ip_prefix)
+
+ def remove_vpp_config(self):
+ self._test.vapi.memif_delete(self.sw_if_index)
+ self.sw_if_index = None
+
+ def query_vpp_config(self):
+ if not self.sw_if_index:
+ return None
+ dump = self._test.vapi.memif_dump()
+ return get_if_dump(dump, self.sw_if_index)
+
+ def object_id(self):
+ if self.sw_if_index:
+ return "%d:%d:%d" % (self.role, self.if_id, self.sw_if_index)
+ else:
+ return "%d:%d:None" % (self.role, self.if_id)
diff --git a/test/vpp_pppoe_interface.py b/test/vpp_pppoe_interface.py
new file mode 100644
index 00000000000..505ac4c6425
--- /dev/null
+++ b/test/vpp_pppoe_interface.py
@@ -0,0 +1,42 @@
+
+from vpp_interface import VppInterface
+import socket
+from vpp_papi import mac_pton
+
+
+class VppPppoeInterface(VppInterface):
+ """
+ VPP Pppoe interface
+ """
+
+ def __init__(self, test, client_ip, client_mac,
+ session_id, decap_vrf_id=0):
+ """ Create VPP PPPoE4 interface """
+ super(VppPppoeInterface, self).__init__(test)
+ self.client_ip = client_ip
+ self.client_mac = client_mac
+ self.session_id = session_id
+ self.decap_vrf_id = decap_vrf_id
+ self.vpp_sw_if_index = -1
+
+ def add_vpp_config(self):
+ r = self.test.vapi.pppoe_add_del_session(
+ self.client_ip, self.client_mac,
+ session_id=self.session_id,
+ decap_vrf_id=self.decap_vrf_id)
+ self.set_sw_if_index(r.sw_if_index)
+ self.vpp_sw_if_index = r.sw_if_index
+ self.generate_remote_hosts()
+
+ def remove_vpp_config(self):
+ self.unconfig()
+ self.test.vapi.pppoe_add_del_session(
+ self.client_ip, self.client_mac,
+ session_id=self.session_id,
+ decap_vrf_id=self.decap_vrf_id,
+ is_add=0)
+
+ def set_unnumbered(self, swif_iface):
+ self.test.vapi.sw_interface_set_unnumbered(
+ swif_iface,
+ self.vpp_sw_if_index)
diff --git a/test/vpp_srv6.py b/test/vpp_srv6.py
new file mode 100644
index 00000000000..d6efedc9f3e
--- /dev/null
+++ b/test/vpp_srv6.py
@@ -0,0 +1,198 @@
+"""
+ SRv6 LocalSIDs
+
+ object abstractions for representing SRv6 localSIDs in VPP
+"""
+
+from vpp_object import VppObject
+from socket import inet_pton, inet_ntop, AF_INET, AF_INET6
+
+
+class SRv6LocalSIDBehaviors():
+ # from src/vnet/srv6/sr.h
+ SR_BEHAVIOR_END = 1
+ SR_BEHAVIOR_X = 2
+ SR_BEHAVIOR_T = 3
+ SR_BEHAVIOR_D_FIRST = 4 # Unused. Separator in between regular and D
+ SR_BEHAVIOR_DX2 = 5
+ SR_BEHAVIOR_DX6 = 6
+ SR_BEHAVIOR_DX4 = 7
+ SR_BEHAVIOR_DT6 = 8
+ SR_BEHAVIOR_DT4 = 9
+ SR_BEHAVIOR_END_UN_PERF = 10
+ SR_BEHAVIOR_END_UN = 11
+ SR_BEHAVIOR_LAST = 12 # Must always be the last one
+
+
+class SRv6PolicyType():
+ # from src/vnet/srv6/sr.h
+ SR_POLICY_TYPE_DEFAULT = 0
+ SR_POLICY_TYPE_SPRAY = 1
+
+
+class SRv6PolicySteeringTypes():
+ # from src/vnet/srv6/sr.h
+ SR_STEER_L2 = 2
+ SR_STEER_IPV4 = 4
+ SR_STEER_IPV6 = 6
+
+
+class VppSRv6LocalSID(VppObject):
+ """
+ SRv6 LocalSID
+ """
+
+ def __init__(self, test, localsid, behavior, nh_addr,
+ end_psp, sw_if_index, vlan_index, fib_table):
+ self._test = test
+ self.localsid = localsid
+ self.behavior = behavior
+ self.nh_addr = nh_addr
+ self.end_psp = end_psp
+ self.sw_if_index = sw_if_index
+ self.vlan_index = vlan_index
+ self.fib_table = fib_table
+ self._configured = False
+
+ def add_vpp_config(self):
+ self._test.vapi.sr_localsid_add_del(
+ localsid=self.localsid,
+ behavior=self.behavior,
+ nh_addr=self.nh_addr,
+ is_del=0,
+ end_psp=self.end_psp,
+ sw_if_index=self.sw_if_index,
+ vlan_index=self.vlan_index,
+ fib_table=self.fib_table)
+ self._configured = True
+
+ def remove_vpp_config(self):
+ self._test.vapi.sr_localsid_add_del(
+ localsid=self.localsid,
+ behavior=self.behavior,
+ nh_addr=self.nh_addr,
+ is_del=1,
+ end_psp=self.end_psp,
+ sw_if_index=self.sw_if_index,
+ vlan_index=self.vlan_index,
+ fib_table=self.fib_table)
+ self._configured = False
+
+ def query_vpp_config(self):
+ # sr_localsids_dump API is disabled
+ # use _configured flag for now
+ return self._configured
+
+ def object_id(self):
+ return ("%d;%s,%d"
+ % (self.fib_table,
+ self.localsid,
+ self.behavior))
+
+
+class VppSRv6Policy(VppObject):
+ """
+ SRv6 Policy
+ """
+
+ def __init__(self, test, bsid,
+ is_encap, sr_type, weight, fib_table,
+ segments, source):
+ self._test = test
+ self.bsid = bsid
+ self.is_encap = is_encap
+ self.sr_type = sr_type
+ self.weight = weight
+ self.fib_table = fib_table
+ self.segments = segments
+ self.n_segments = len(segments)
+ # source not passed to API
+ # self.source = inet_pton(AF_INET6, source)
+ self.source = source
+ self._configured = False
+
+ def add_vpp_config(self):
+ self._test.vapi.sr_policy_add(
+ bsid=self.bsid,
+ weight=self.weight,
+ is_encap=self.is_encap,
+ is_spray=self.sr_type,
+ fib_table=self.fib_table,
+ sids={'num_sids': self.n_segments, 'sids': self.segments})
+ self._configured = True
+
+ def remove_vpp_config(self):
+ self._test.vapi.sr_policy_del(
+ self.bsid)
+ self._configured = False
+
+ def query_vpp_config(self):
+ # no API to query SR Policies
+ # use _configured flag for now
+ return self._configured
+
+ def object_id(self):
+ return ("%d;%s-><%s>;%d"
+ % (self.sr_type,
+ self.bsid,
+ ','.join(self.segments),
+ self.is_encap))
+
+
+class VppSRv6Steering(VppObject):
+ """
+ SRv6 Steering
+ """
+
+ def __init__(self, test,
+ bsid,
+ prefix,
+ mask_width,
+ traffic_type,
+ sr_policy_index,
+ table_id,
+ sw_if_index):
+ self._test = test
+ self.bsid = bsid
+ self.prefix = prefix
+ self.mask_width = mask_width
+ self.traffic_type = traffic_type
+ self.sr_policy_index = sr_policy_index
+ self.sw_if_index = sw_if_index
+ self.table_id = table_id
+ self._configured = False
+
+ def add_vpp_config(self):
+ self._test.vapi.sr_steering_add_del(
+ is_del=0,
+ bsid=self.bsid,
+ sr_policy_index=self.sr_policy_index,
+ table_id=self.table_id,
+ prefix={'address': self.prefix, 'len': self.mask_width},
+ sw_if_index=self.sw_if_index,
+ traffic_type=self.traffic_type)
+ self._configured = True
+
+ def remove_vpp_config(self):
+ self._test.vapi.sr_steering_add_del(
+ is_del=1,
+ bsid=self.bsid,
+ sr_policy_index=self.sr_policy_index,
+ table_id=self.table_id,
+ prefix={'address': self.prefix, 'len': self.mask_width},
+ sw_if_index=self.sw_if_index,
+ traffic_type=self.traffic_type)
+ self._configured = False
+
+ def query_vpp_config(self):
+ # no API to query steering entries
+ # use _configured flag for now
+ return self._configured
+
+ def object_id(self):
+ return ("%d;%d;%s/%d->%s"
+ % (self.table_id,
+ self.traffic_type,
+ self.prefix,
+ self.mask_width,
+ self.bsid))
diff --git a/test/vpp_vxlan_gbp_tunnel.py b/test/vpp_vxlan_gbp_tunnel.py
new file mode 100644
index 00000000000..0898bd9f810
--- /dev/null
+++ b/test/vpp_vxlan_gbp_tunnel.py
@@ -0,0 +1,75 @@
+
+from vpp_interface import VppInterface
+from vpp_papi import VppEnum
+
+
+INDEX_INVALID = 0xffffffff
+
+
+def find_vxlan_gbp_tunnel(test, src, dst, vni):
+ ts = test.vapi.vxlan_gbp_tunnel_dump(INDEX_INVALID)
+ for t in ts:
+ if src == str(t.tunnel.src) and \
+ dst == str(t.tunnel.dst) and \
+ t.tunnel.vni == vni:
+ return t.tunnel.sw_if_index
+ return INDEX_INVALID
+
+
+class VppVxlanGbpTunnel(VppInterface):
+ """
+ VPP VXLAN GBP interface
+ """
+
+ def __init__(self, test, src, dst, vni, mcast_itf=None, mode=None,
+ is_ipv6=None, encap_table_id=None, instance=0xffffffff):
+ """ Create VXLAN-GBP Tunnel interface """
+ super(VppVxlanGbpTunnel, self).__init__(test)
+ self.src = src
+ self.dst = dst
+ self.vni = vni
+ self.mcast_itf = mcast_itf
+ self.ipv6 = is_ipv6
+ self.encap_table_id = encap_table_id
+ self.instance = instance
+ if not mode:
+ self.mode = (VppEnum.vl_api_vxlan_gbp_api_tunnel_mode_t.
+ VXLAN_GBP_API_TUNNEL_MODE_L2)
+ else:
+ self.mode = mode
+
+ def encode(self):
+ return {
+ 'src': self.src,
+ 'dst': self.dst,
+ 'mode': self.mode,
+ 'vni': self.vni,
+ 'mcast_sw_if_index': self.mcast_itf.sw_if_index
+ if self.mcast_itf else INDEX_INVALID,
+ 'encap_table_id': self.encap_table_id,
+ 'instance': self.instance,
+ }
+
+ def add_vpp_config(self):
+ reply = self.test.vapi.vxlan_gbp_tunnel_add_del(
+ is_add=1,
+ tunnel=self.encode(),
+ )
+ self.set_sw_if_index(reply.sw_if_index)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self.test.vapi.vxlan_gbp_tunnel_add_del(
+ is_add=0,
+ tunnel=self.encode(),
+ )
+
+ def query_vpp_config(self):
+ return (INDEX_INVALID != find_vxlan_gbp_tunnel(self._test,
+ self.src,
+ self.dst,
+ self.vni))
+
+ def object_id(self):
+ return "vxlan-gbp-%d-%d-%s-%s" % (self.sw_if_index, self.vni,
+ self.src, self.dst)
diff --git a/test/vpp_vxlan_tunnel.py b/test/vpp_vxlan_tunnel.py
new file mode 100644
index 00000000000..d7e087da6f8
--- /dev/null
+++ b/test/vpp_vxlan_tunnel.py
@@ -0,0 +1,87 @@
+from vpp_interface import VppInterface
+from vpp_papi import VppEnum
+
+
+INDEX_INVALID = 0xffffffff
+DEFAULT_PORT = 4789
+UNDEFINED_PORT = 0
+
+
+def find_vxlan_tunnel(test, src, dst, s_port, d_port, vni):
+ ts = test.vapi.vxlan_tunnel_v2_dump(INDEX_INVALID)
+
+ src_port = DEFAULT_PORT
+ if s_port != UNDEFINED_PORT:
+ src_port = s_port
+
+ dst_port = DEFAULT_PORT
+ if d_port != UNDEFINED_PORT:
+ dst_port = d_port
+
+ for t in ts:
+ if src == str(t.src_address) and \
+ dst == str(t.dst_address) and \
+ src_port == t.src_port and \
+ dst_port == t.dst_port and \
+ t.vni == vni:
+ return t.sw_if_index
+ return INDEX_INVALID
+
+
+class VppVxlanTunnel(VppInterface):
+ """
+ VPP VXLAN interface
+ """
+
+ def __init__(self, test, src, dst, vni,
+ src_port=UNDEFINED_PORT, dst_port=UNDEFINED_PORT,
+ mcast_itf=None,
+ mcast_sw_if_index=INDEX_INVALID,
+ decap_next_index=INDEX_INVALID,
+ encap_vrf_id=None, instance=0xffffffff, is_l3=False):
+ """ Create VXLAN Tunnel interface """
+ super(VppVxlanTunnel, self).__init__(test)
+ self.src = src
+ self.dst = dst
+ self.vni = vni
+ self.src_port = src_port
+ self.dst_port = dst_port
+ self.mcast_itf = mcast_itf
+ self.mcast_sw_if_index = mcast_sw_if_index
+ self.encap_vrf_id = encap_vrf_id
+ self.decap_next_index = decap_next_index
+ self.instance = instance
+ self.is_l3 = is_l3
+
+ if (self.mcast_itf):
+ self.mcast_sw_if_index = self.mcast_itf.sw_if_index
+
+ def add_vpp_config(self):
+ reply = self.test.vapi.vxlan_add_del_tunnel_v3(
+ is_add=1, src_address=self.src, dst_address=self.dst, vni=self.vni,
+ src_port=self.src_port, dst_port=self.dst_port,
+ mcast_sw_if_index=self.mcast_sw_if_index,
+ encap_vrf_id=self.encap_vrf_id, is_l3=self.is_l3,
+ instance=self.instance, decap_next_index=self.decap_next_index)
+ self.set_sw_if_index(reply.sw_if_index)
+ self._test.registry.register(self, self._test.logger)
+
+ def remove_vpp_config(self):
+ self.test.vapi.vxlan_add_del_tunnel_v2(
+ is_add=0, src_address=self.src, dst_address=self.dst, vni=self.vni,
+ src_port=self.src_port, dst_port=self.dst_port,
+ mcast_sw_if_index=self.mcast_sw_if_index,
+ encap_vrf_id=self.encap_vrf_id, instance=self.instance,
+ decap_next_index=self.decap_next_index)
+
+ def query_vpp_config(self):
+ return (INDEX_INVALID != find_vxlan_tunnel(self._test,
+ self.src,
+ self.dst,
+ self.src_port,
+ self.dst_port,
+ self.vni))
+
+ def object_id(self):
+ return "vxlan-%d-%d-%s-%s" % (self.sw_if_index, self.vni,
+ self.src, self.dst)