aboutsummaryrefslogtreecommitdiffstats
path: root/vpp/test
diff options
context:
space:
mode:
authorsreejith <sreejith.surendrannair@linaro.org>2017-03-29 01:15:02 -0400
committersreejith <sreejith.surendrannair@linaro.org>2017-03-29 02:23:59 -0400
commita23197980e40d4d9414bcfaf59005a1dc2a89251 (patch)
treeda3fc5410a9cda99e05d2e0f6dae06072a0c50b8 /vpp/test
parent746b57564deede624261ab8a96c94f562f24d22c (diff)
Added vpp intial source code from master branch 17.01.1
Change-Id: I81bdace6f330825a1746a853766779dfb24765fd Signed-off-by: sreejith <sreejith.surendrannair@linaro.org>
Diffstat (limited to 'vpp/test')
-rw-r--r--vpp/test/Makefile109
-rw-r--r--vpp/test/bfd.py217
-rw-r--r--vpp/test/doc/Makefile243
-rw-r--r--vpp/test/doc/conf.py341
-rw-r--r--vpp/test/doc/index.rst165
-rw-r--r--vpp/test/framework.py697
-rw-r--r--vpp/test/hook.py227
-rw-r--r--vpp/test/log.py72
-rw-r--r--vpp/test/patches/scapy-2.3.3/gre-layers.patch25
-rw-r--r--vpp/test/patches/scapy-2.3.3/mpls.py.patch13
-rw-r--r--vpp/test/run_tests.py12
-rw-r--r--vpp/test/scapy_handlers/__init__.py0
-rw-r--r--vpp/test/template_bd.py174
-rw-r--r--vpp/test/test_bfd.py275
-rw-r--r--vpp/test/test_classifier.py343
-rw-r--r--vpp/test/test_fib.py30
-rw-r--r--vpp/test/test_gre.py672
-rw-r--r--vpp/test/test_ip4.py469
-rw-r--r--vpp/test/test_ip4_irb.py266
-rw-r--r--vpp/test/test_ip6.py212
-rw-r--r--vpp/test/test_l2_fib.py382
-rw-r--r--vpp/test/test_l2bd.py286
-rw-r--r--vpp/test/test_l2bd_multi_instance.py498
-rw-r--r--vpp/test/test_l2xc.py230
-rw-r--r--vpp/test/test_l2xc_multi_instance.py345
-rw-r--r--vpp/test/test_lb.py217
-rw-r--r--vpp/test/test_mpls.py743
-rw-r--r--vpp/test/test_snat.py626
-rw-r--r--vpp/test/test_span.py193
-rw-r--r--vpp/test/test_vxlan.py196
-rw-r--r--vpp/test/util.py87
-rw-r--r--vpp/test/vpp_gre_interface.py36
-rw-r--r--vpp/test/vpp_interface.py264
-rw-r--r--vpp/test/vpp_ip_route.py116
-rw-r--r--vpp/test/vpp_lo_interface.py13
-rw-r--r--vpp/test/vpp_object.py79
-rw-r--r--vpp/test/vpp_papi_provider.py986
-rw-r--r--vpp/test/vpp_pg_interface.py326
-rw-r--r--vpp/test/vpp_sub_interface.py140
39 files changed, 10325 insertions, 0 deletions
diff --git a/vpp/test/Makefile b/vpp/test/Makefile
new file mode 100644
index 00000000..e2d634c4
--- /dev/null
+++ b/vpp/test/Makefile
@@ -0,0 +1,109 @@
+.PHONY: verify-python-path
+
+verify-python-path:
+ifndef VPP_PYTHON_PREFIX
+ $(error VPP_PYTHON_PREFIX is not set)
+endif
+
+PYTHON_VENV_PATH=$(VPP_PYTHON_PREFIX)/virtualenv
+PYTHON_DEPENDS=scapy==2.3.3 pexpect
+SCAPY_SOURCE=$(PYTHON_VENV_PATH)/lib/python2.7/site-packages/
+BUILD_COV_DIR = $(BR)/test-cov
+
+PIP_INSTALL_DONE=$(VPP_PYTHON_PREFIX)/pip-install.done
+PIP_PATCH_DONE=$(VPP_PYTHON_PREFIX)/pip-patch.done
+PAPI_INSTALL_DONE=$(VPP_PYTHON_PREFIX)/papi-install.done
+
+PAPI_INSTALL_FLAGS=$(PIP_INSTALL_DONE) $(PIP_PATCH_DONE) $(PAPI_INSTALL_DONE)
+
+$(PIP_INSTALL_DONE):
+ @virtualenv $(PYTHON_VENV_PATH)
+ @bash -c "source $(PYTHON_VENV_PATH)/bin/activate && pip install $(PYTHON_DEPENDS)"
+ @touch $@
+
+$(PIP_PATCH_DONE): $(PIP_INSTALL_DONE)
+ @echo --- patching ---
+ @sleep 1 # Ensure python recompiles patched *.py files -> *.pyc
+ for f in $(CURDIR)/patches/scapy-2.3.3/*.patch ; do \
+ echo Applying patch: $$(basename $$f) ; \
+ patch -p1 -d $(SCAPY_SOURCE) < $$f ; \
+ done
+ @touch $@
+
+$(PAPI_INSTALL_DONE): $(PIP_PATCH_DONE)
+ @bash -c "source $(PYTHON_VENV_PATH)/bin/activate && cd $(WS_ROOT)/vpp-api/python && python setup.py install"
+ @touch $@
+
+define retest-func
+ @bash -c "source $(PYTHON_VENV_PATH)/bin/activate && python run_tests.py discover -p test_$(TEST)\"*.py\""
+endef
+
+test: reset verify-python-path $(PAPI_INSTALL_DONE)
+ $(call retest-func)
+
+retest: reset verify-python-path
+ $(call retest-func)
+
+.PHONY: wipe doc
+
+reset:
+ @rm -f /dev/shm/vpp-unittest-*
+ @rm -rf /tmp/vpp-unittest-*
+
+wipe: reset
+ @rm -rf $(PYTHON_VENV_PATH)
+ @rm -f $(PAPI_INSTALL_FLAGS)
+
+doc: verify-python-path
+ @virtualenv $(PYTHON_VENV_PATH)
+ @bash -c "source $(PYTHON_VENV_PATH)/bin/activate && pip install $(PYTHON_DEPENDS) sphinx"
+ @bash -c "source $(PYTHON_VENV_PATH)/bin/activate && make -C doc WS_ROOT=$(WS_ROOT) BR=$(BR) NO_VPP_PAPI=1 html"
+
+.PHONY: wipe-doc
+
+wipe-doc:
+ @make -C doc wipe BR=$(BR)
+
+cov: wipe-cov reset verify-python-path $(PAPI_INSTALL_DONE)
+ @lcov --zerocounters --directory $(VPP_TEST_BUILD_DIR)
+ $(call retest-func)
+ @mkdir $(BUILD_COV_DIR)
+ @lcov --capture --directory $(VPP_TEST_BUILD_DIR) --output-file $(BUILD_COV_DIR)/coverage.info
+ @genhtml $(BUILD_COV_DIR)/coverage.info --output-directory $(BUILD_COV_DIR)/html
+ @echo
+ @echo "Build finished. Code coverage report is in $(BUILD_COV_DIR)/html/index.html"
+
+.PHONY: wipe-cov
+
+wipe-cov: wipe
+ @rm -rf $(BUILD_COV_DIR)
+
+help:
+ @echo "Running tests:"
+ @echo ""
+ @echo " test - build and run functional tests"
+ @echo " test-debug - build and run functional tests (debug build)"
+ @echo " retest - run functional tests"
+ @echo " retest-debug - run functional tests (debug build)"
+ @echo " test-wipe - wipe (temporary) files generated by unit tests"
+ @echo ""
+ @echo "Arguments controlling test runs:"
+ @echo " V=[0|1|2] - set test verbosity level"
+ @echo " DEBUG=<type> - set VPP debugging kind"
+ @echo " DEBUG=core - detect coredump and load it in gdb on crash"
+ @echo " DEBUG=gdb - allow easy debugging by printing VPP PID "
+ @echo " and waiting for user input before running "
+ @echo " and tearing down a testcase"
+ @echo " DEBUG=gdbserver - run gdb inside a gdb server, otherwise "
+ @echo " same as above"
+ @echo " STEP=[yes|no] - ease debugging by stepping through a testcase "
+ @echo " TEST=<name> - only run specific test"
+ @echo ""
+ @echo "Creating test documentation"
+ @echo " test-doc - generate documentation for test framework"
+ @echo " test-wipe-doc - wipe documentation for test framework"
+ @echo ""
+ @echo "Creating test code coverage report"
+ @echo " test-cov - generate code coverage report for test framework"
+ @echo " test-wipe-cov - wipe code coverage report for test framework"
+ @echo ""
diff --git a/vpp/test/bfd.py b/vpp/test/bfd.py
new file mode 100644
index 00000000..fe63264e
--- /dev/null
+++ b/vpp/test/bfd.py
@@ -0,0 +1,217 @@
+from socket import AF_INET, AF_INET6
+from scapy.all import *
+from scapy.packet import *
+from scapy.fields import *
+from framework import *
+from vpp_object import *
+from util import NumericConstant
+
+
+class BFDDiagCode(NumericConstant):
+ """ BFD Diagnostic Code """
+ no_diagnostic = 0
+ control_detection_time_expired = 1
+ echo_function_failed = 2
+ neighbor_signaled_session_down = 3
+ forwarding_plane_reset = 4
+ path_down = 5
+ concatenated_path_down = 6
+ administratively_down = 7
+ reverse_concatenated_path_down = 8
+
+ desc_dict = {
+ no_diagnostic: "No diagnostic",
+ control_detection_time_expired: "Control Detection Time Expired",
+ echo_function_failed: "Echo Function Failed",
+ neighbor_signaled_session_down: "Neighbor Signaled Session Down",
+ forwarding_plane_reset: "Forwarding Plane Reset",
+ path_down: "Path Down",
+ concatenated_path_down: "Concatenated Path Down",
+ administratively_down: "Administratively Down",
+ reverse_concatenated_path_down: "Reverse Concatenated Path Down",
+ }
+
+ def __init__(self, value):
+ NumericConstant.__init__(self, value)
+
+
+class BFDState(NumericConstant):
+ """ BFD State """
+ admin_down = 0
+ down = 1
+ init = 2
+ up = 3
+
+ desc_dict = {
+ admin_down: "AdminDown",
+ down: "Down",
+ init: "Init",
+ up: "Up",
+ }
+
+ def __init__(self, value):
+ NumericConstant.__init__(self, value)
+
+
+class BFD(Packet):
+
+ udp_dport = 3784 #: BFD destination port per RFC 5881
+ udp_sport_min = 49152 #: BFD source port min value per RFC 5881
+ udp_sport_max = 65535 #: BFD source port max value per RFC 5881
+
+ name = "BFD"
+
+ fields_desc = [
+ BitField("version", 1, 3),
+ BitEnumField("diag", 0, 5, BFDDiagCode.desc_dict),
+ BitEnumField("state", 0, 2, BFDState.desc_dict),
+ FlagsField("flags", 0, 6, ['P', 'F', 'C', 'A', 'D', 'M']),
+ XByteField("detect_mult", 0),
+ XByteField("length", 24),
+ BitField("my_discriminator", 0, 32),
+ BitField("your_discriminator", 0, 32),
+ BitField("desired_min_tx_interval", 0, 32),
+ BitField("required_min_rx_interval", 0, 32),
+ BitField("required_min_echo_rx_interval", 0, 32)]
+
+ def mysummary(self):
+ return self.sprintf("BFD(my_disc=%BFD.my_discriminator%,"
+ "your_disc=%BFD.your_discriminator%)")
+
+# glue the BFD packet class to scapy parser
+bind_layers(UDP, BFD, dport=BFD.udp_dport)
+
+
+class VppBFDUDPSession(VppObject):
+ """ Represents BFD UDP session in VPP """
+
+ @property
+ def test(self):
+ """ Test which created this session """
+ return self._test
+
+ @property
+ def interface(self):
+ """ Interface on which this session lives """
+ return self._interface
+
+ @property
+ def af(self):
+ """ Address family - AF_INET or AF_INET6 """
+ return self._af
+
+ @property
+ def bs_index(self):
+ """ BFD session index from VPP """
+ if self._bs_index is not None:
+ return self._bs_index
+ raise NotConfiguredException("not configured")
+
+ @property
+ def local_addr(self):
+ """ BFD session local address (VPP address) """
+ if self._local_addr is None:
+ return self._interface.local_ip4
+ return self._local_addr
+
+ @property
+ def local_addr_n(self):
+ """ BFD session local address (VPP address) - raw, suitable for API """
+ if self._local_addr is None:
+ return self._interface.local_ip4n
+ return self._local_addr_n
+
+ @property
+ def peer_addr(self):
+ """ BFD session peer address """
+ return self._peer_addr
+
+ @property
+ def peer_addr_n(self):
+ """ BFD session peer address - raw, suitable for API """
+ return self._peer_addr_n
+
+ @property
+ def state(self):
+ """ BFD session state """
+ result = self.test.vapi.bfd_udp_session_dump()
+ session = None
+ for s in result:
+ if s.sw_if_index == self.interface.sw_if_index:
+ if self.af == AF_INET \
+ and s.is_ipv6 == 0 \
+ and self.interface.local_ip4n == s.local_addr[:4] \
+ and self.interface.remote_ip4n == s.peer_addr[:4]:
+ session = s
+ break
+ if session is None:
+ raise Exception(
+ "Could not find BFD session in VPP response: %s" % repr(result))
+ return session.state
+
+ @property
+ def desired_min_tx(self):
+ return self._desired_min_tx
+
+ @property
+ def required_min_rx(self):
+ return self._required_min_rx
+
+ @property
+ def detect_mult(self):
+ return self._detect_mult
+
+ def __init__(self, test, interface, peer_addr, local_addr=None, af=AF_INET,
+ desired_min_tx=100000, required_min_rx=100000, detect_mult=3):
+ self._test = test
+ self._interface = interface
+ self._af = af
+ self._local_addr = local_addr
+ self._peer_addr = peer_addr
+ self._peer_addr_n = socket.inet_pton(af, peer_addr)
+ self._bs_index = None
+ self._desired_min_tx = desired_min_tx
+ self._required_min_rx = required_min_rx
+ self._detect_mult = detect_mult
+
+ def add_vpp_config(self):
+ is_ipv6 = 1 if AF_INET6 == self.af else 0
+ result = self.test.vapi.bfd_udp_add(
+ self._interface.sw_if_index,
+ self.desired_min_tx,
+ self.required_min_rx,
+ self.detect_mult,
+ self.local_addr_n,
+ self.peer_addr_n,
+ is_ipv6=is_ipv6)
+ self._bs_index = result.bs_index
+
+ def query_vpp_config(self):
+ result = self.test.vapi.bfd_udp_session_dump()
+ session = None
+ for s in result:
+ if s.sw_if_index == self.interface.sw_if_index:
+ if self.af == AF_INET \
+ and s.is_ipv6 == 0 \
+ and self.interface.local_ip4n == s.local_addr[:4] \
+ and self.interface.remote_ip4n == s.peer_addr[:4]:
+ session = s
+ break
+ if session is None:
+ return False
+ return True
+
+ def remove_vpp_config(self):
+ if hasattr(self, '_bs_index'):
+ is_ipv6 = 1 if AF_INET6 == self._af else 0
+ self.test.vapi.bfd_udp_del(
+ self._interface.sw_if_index,
+ self.local_addr_n,
+ self.peer_addr_n,
+ is_ipv6=is_ipv6)
+
+ def object_id(self):
+ return "bfd-udp-%d" % self.bs_index
+
+ def admin_up(self):
+ self.test.vapi.bfd_session_set_flags(self.bs_index, 1)
diff --git a/vpp/test/doc/Makefile b/vpp/test/doc/Makefile
new file mode 100644
index 00000000..809abef8
--- /dev/null
+++ b/vpp/test/doc/Makefile
@@ -0,0 +1,243 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SRC_DOC_DIR = $(WS_ROOT)/test/doc
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILD_DOC_ROOT = $(BR)/test-doc
+BUILD_DOC_DIR = $(BUILD_DOC_ROOT)/build
+API_DOC_GEN_DIR = $(BUILD_DOC_ROOT)/apidoc
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILD_DOC_DIR)/.sphinx-cache $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(API_DOC_GEN_DIR) -c $(SRC_DOC_DIR)
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+INDEX_REL_PATH:=$(shell realpath --relative-to=$(API_DOC_GEN_DIR) $(SRC_DOC_DIR)/index.rst)
+IN_VENV:=$(shell if pip -V | grep "virtualenv" 2>&1 > /dev/null; then echo 1; else echo 0; fi)
+
+.PHONY: verify-virtualenv
+verify-virtualenv:
+ifeq ($(IN_VENV),0)
+ $(error "Not running inside virtualenv (are you running 'make test-doc' from root?)")
+endif
+
+.PHONY: regen-api-doc
+regen-api-doc: verify-virtualenv
+ @mkdir -p $(API_DOC_GEN_DIR)
+ #@echo ".. include:: $(INDEX_REL_PATH)" > $(API_DOC_GEN_DIR)/index.rst
+ @cp $(SRC_DOC_DIR)/index.rst $(API_DOC_GEN_DIR)
+ sphinx-apidoc -o $(API_DOC_GEN_DIR) ..
+
+.PHONY: help
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " applehelp to make an Apple Help Book"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " epub3 to make an epub3"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " texinfo to make Texinfo files"
+ @echo " info to make Texinfo files and run them through makeinfo"
+ @echo " gettext to make PO message catalogs"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " xml to make Docutils-native XML files"
+ @echo " pseudoxml to make pseudoxml-XML files for display purposes"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+ @echo " coverage to run coverage check of the documentation (if enabled)"
+ @echo " dummy to check syntax errors of document sources"
+
+.PHONY: wipe
+wipe:
+ rm -rf $(BUILD_DOC_ROOT)
+
+.PHONY: html
+html: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILD_DOC_DIR)/html."
+
+.PHONY: dirhtml
+dirhtml: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILD_DOC_DIR)/dirhtml."
+
+.PHONY: singlehtml
+singlehtml: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILD_DOC_DIR)/singlehtml."
+
+.PHONY: pickle
+pickle: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+.PHONY: json
+json: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+.PHONY: htmlhelp
+htmlhelp: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILD_DOC_DIR)/htmlhelp."
+
+.PHONY: qthelp
+qthelp: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILD_DOC_DIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILD_DOC_DIR)/qthelp/VPPtestframework.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILD_DOC_DIR)/qthelp/VPPtestframework.qhc"
+
+.PHONY: applehelp
+applehelp: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/applehelp
+ @echo
+ @echo "Build finished. The help book is in $(BUILD_DOC_DIR)/applehelp."
+ @echo "N.B. You won't be able to view it unless you put it in" \
+ "~/Library/Documentation/Help or install it in your application" \
+ "bundle."
+
+.PHONY: devhelp
+devhelp: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/VPPtestframework"
+ @echo "# ln -s $(BUILD_DOC_DIR)/devhelp $$HOME/.local/share/devhelp/VPPtestframework"
+ @echo "# devhelp"
+
+.PHONY: epub
+epub: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILD_DOC_DIR)/epub."
+
+.PHONY: epub3
+epub3:
+ $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/epub3
+ @echo
+ @echo "Build finished. The epub3 file is in $(BUILD_DOC_DIR)/epub3."
+
+.PHONY: latex
+latex: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILD_DOC_DIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+.PHONY: latexpdf
+latexpdf: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ $(MAKE) -C $(BUILD_DOC_DIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILD_DOC_DIR)/latex."
+
+.PHONY: latexpdfja
+latexpdfja: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/latex
+ @echo "Running LaTeX files through platex and dvipdfmx..."
+ $(MAKE) -C $(BUILD_DOC_DIR)/latex all-pdf-ja
+ @echo "pdflatex finished; the PDF files are in $(BUILD_DOC_DIR)/latex."
+
+.PHONY: text
+text: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILD_DOC_DIR)/text."
+
+.PHONY: man
+man: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILD_DOC_DIR)/man."
+
+.PHONY: texinfo
+texinfo: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILD_DOC_DIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" \
+ "(use \`make info' here to do that automatically)."
+
+.PHONY: info
+info: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILD_DOC_DIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILD_DOC_DIR)/texinfo."
+
+.PHONY: gettext
+gettext: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILD_DOC_DIR)/locale
+ @echo
+ @echo "Build finished. The message catalogs are in $(BUILD_DOC_DIR)/locale."
+
+.PHONY: changes
+changes: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILD_DOC_DIR)/changes."
+
+.PHONY: linkcheck
+linkcheck: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILD_DOC_DIR)/linkcheck/output.txt."
+
+.PHONY: doctest
+doctest: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILD_DOC_DIR)/doctest/output.txt."
+
+.PHONY: coverage
+coverage: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/coverage
+ @echo "Testing of coverage in the sources finished, look at the " \
+ "results in $(BUILD_DOC_DIR)/coverage/python.txt."
+
+.PHONY: xml
+xml: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/xml
+ @echo
+ @echo "Build finished. The XML files are in $(BUILD_DOC_DIR)/xml."
+
+.PHONY: pseudoxml
+pseudoxml: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/pseudoxml
+ @echo
+ @echo "Build finished. The pseudo-XML files are in $(BUILD_DOC_DIR)/pseudoxml."
+
+.PHONY: dummy
+dummy: regen-api-doc verify-virtualenv
+ $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILD_DOC_DIR)/dummy
+ @echo
+ @echo "Build finished. Dummy builder generates no files."
diff --git a/vpp/test/doc/conf.py b/vpp/test/doc/conf.py
new file mode 100644
index 00000000..96ea50fe
--- /dev/null
+++ b/vpp/test/doc/conf.py
@@ -0,0 +1,341 @@
+# -*- coding: utf-8 -*-
+#
+# VPP test framework documentation build configuration file, created by
+# sphinx-quickstart on Thu Oct 13 08:45:03 2016.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+import os
+import sys
+sys.path.insert(0, os.path.abspath('..'))
+
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#
+# needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ 'sphinx.ext.autodoc',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+#
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The encoding of source files.
+#
+# source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'VPP test framework'
+copyright = u'2016, VPP team'
+author = u'VPP team'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = u'0.1'
+# The full version, including alpha/beta/rc tags.
+release = u'0.1'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#
+# today = ''
+#
+# Else, today_fmt is used as the format for a strftime call.
+#
+# today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This patterns also effect to html_static_path and html_extra_path
+exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#
+default_role = 'any'
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#
+add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#
+# add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#
+# show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+# modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+# keep_warnings = False
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+#
+html_theme = 'alabaster'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#
+# html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+# html_theme_path = []
+
+# The name for this set of Sphinx documents.
+# "<project> v<release> documentation" by default.
+#
+# html_title = u'VPP test framework v0.1'
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#
+# html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#
+# html_logo = None
+
+# The name of an image file (relative to this directory) to use as a favicon of
+# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#
+# html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+# html_static_path = []
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#
+# html_extra_path = []
+
+# If not None, a 'Last updated on:' timestamp is inserted at every page
+# bottom, using the given strftime format.
+# The empty string is equivalent to '%b %d, %Y'.
+#
+# html_last_updated_fmt = None
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#
+# html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#
+# html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#
+# html_additional_pages = {}
+
+# If false, no module index is generated.
+#
+# html_domain_indices = True
+
+# If false, no index is generated.
+#
+# html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#
+# html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#
+# html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#
+# html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#
+# html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#
+# html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+# html_file_suffix = None
+
+# Language to be used for generating the HTML full-text search index.
+# Sphinx supports the following languages:
+# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
+# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
+#
+# html_search_language = 'en'
+
+# A dictionary with options for the search language support, empty by default.
+# 'ja' uses this config value.
+# 'zh' user can custom change `jieba` dictionary path.
+#
+# html_search_options = {'type': 'default'}
+
+# The name of a javascript file (relative to the configuration directory) that
+# implements a search results scorer. If empty, the default will be used.
+#
+# html_search_scorer = 'scorer.js'
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'VPPtestframeworkdoc'
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+ # The paper size ('letterpaper' or 'a4paper').
+ #
+ # 'papersize': 'letterpaper',
+
+ # The font size ('10pt', '11pt' or '12pt').
+ #
+ # 'pointsize': '10pt',
+
+ # Additional stuff for the LaTeX preamble.
+ #
+ # 'preamble': '',
+
+ # Latex figure (float) alignment
+ #
+ # 'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ (master_doc, 'VPPtestframework.tex', u'VPP test framework Documentation',
+ u'VPP team', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#
+# latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#
+# latex_use_parts = False
+
+# If true, show page references after internal links.
+#
+# latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#
+# latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#
+# latex_appendices = []
+
+# It false, will not define \strong, \code, itleref, \crossref ... but only
+# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
+# packages.
+#
+# latex_keep_old_macro_names = True
+
+# If false, no module index is generated.
+#
+# latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ (master_doc, 'vpptestframework', u'VPP test framework Documentation',
+ [author], 1)
+]
+
+# If true, show URL addresses after external links.
+#
+# man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (master_doc, 'VPPtestframework', u'VPP test framework Documentation',
+ author, 'VPPtestframework', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#
+# texinfo_appendices = []
+
+# If false, no module index is generated.
+#
+# texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#
+# texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#
+# texinfo_no_detailmenu = False
diff --git a/vpp/test/doc/index.rst b/vpp/test/doc/index.rst
new file mode 100644
index 00000000..8cbe961f
--- /dev/null
+++ b/vpp/test/doc/index.rst
@@ -0,0 +1,165 @@
+.. _unittest: https://docs.python.org/2/library/unittest.html
+.. _TestCase: https://docs.python.org/2/library/unittest.html#unittest.TestCase
+.. _AssertionError: https://docs.python.org/2/library/exceptions.html#exceptions.AssertionError
+.. _SkipTest: https://docs.python.org/2/library/unittest.html#unittest.SkipTest
+.. _virtualenv: http://docs.python-guide.org/en/latest/dev/virtualenvs/
+.. _scapy: http://www.secdev.org/projects/scapy/
+
+.. |vtf| replace:: VPP Test Framework
+
+|vtf|
+=====
+
+Overview
+########
+
+The goal of the |vtf| is to ease writing, running and debugging
+unit tests for the VPP. For this, python was chosen as a high level language
+allowing rapid development with scapy_ providing the necessary tool for creating
+and dissecting packets.
+
+Anatomy of a test case
+######################
+
+Python's unittest_ is used as the base framework upon which the VPP test
+framework is built. A test suite in the |vtf| consists of multiple classes
+derived from `VppTestCase`, which is itself derived from TestCase_.
+The test class defines one or more test functions, which act as test cases.
+
+Function flow when running a test case is:
+
+1. `setUpClass <VppTestCase.setUpClass>`:
+ This function is called once for each test class, allowing a one-time test
+ setup to be executed. If this functions throws an exception,
+ none of the test functions are executed.
+2. `setUp <VppTestCase.setUp>`:
+ The setUp function runs before each of the test functions. If this function
+ throws an exception other than AssertionError_ or SkipTest_, then this is
+ considered an error, not a test failure.
+3. *test_<name>*:
+ This is the guts of the test case. It should execute the test scenario
+ and use the various assert functions from the unittest framework to check
+ necessary.
+4. `tearDown <VppTestCase.tearDown>`:
+ The tearDown function is called after each test function with the purpose
+ of doing partial cleanup.
+5. `tearDownClass <VppTestCase.tearDownClass>`:
+ Method called once after running all of the test functions to perform
+ the final cleanup.
+
+Test temporary directory and VPP life cycle
+###########################################
+
+Test separation is achieved by separating the test files and vpp instances.
+Each test creates a temporary directory and it's name is used to create
+a shared memory prefix which is used to run a VPP instance.
+This way, there is no conflict between any other VPP instances running
+on the box and the test VPP. Any temporary files created by the test case
+are stored in this temporary test directory.
+
+Virtual environment
+###################
+
+Virtualenv_ is a python module which provides a means to create an environment
+containing the dependencies required by the |vtf|, allowing a separation
+from any existing system-wide packages. |vtf|'s Makefile automatically
+creates a virtualenv_ inside build-root and installs the required packages
+in that environment. The environment is entered whenever executing a test
+via one of the make test targets.
+
+Naming conventions
+##################
+
+Most unit tests do some kind of packet manipulation - sending and receiving
+packets between VPP and virtual hosts connected to the VPP. Referring
+to the sides, addresses, etc. is always done as if looking from the VPP side,
+thus:
+
+* *local_* prefix is used for the VPP side.
+ So e.g. `local_ip4 <VppInterface.local_ip4>` address is the IPv4 address
+ assigned to the VPP interface.
+* *remote_* prefix is used for the virtual host side.
+ So e.g. `remote_mac <VppInterface.remote_mac>` address is the MAC address
+ assigned to the virtual host connected to the VPP.
+
+Packet flow in the |vtf|
+########################
+
+Test framework -> VPP
+~~~~~~~~~~~~~~~~~~~~~
+
+|vtf| doesn't send any packets to VPP directly. Traffic is instead injected
+using packet-generator interfaces, represented by the `VppPGInterface` class.
+Packets are written into a temporary .pcap file, which is then read by the VPP
+and the packets are injected into the VPP world.
+
+VPP -> test framework
+~~~~~~~~~~~~~~~~~~~~~
+
+Similarly, VPP doesn't send any packets to |vtf| directly. Instead, packet
+capture feature is used to capture and write traffic to a temporary .pcap file,
+which is then read and analyzed by the |vtf|.
+
+Test framework objects
+######################
+
+The following objects provide VPP abstraction and provide a means to do
+common tasks easily in the test cases.
+
+* `VppInterface`: abstract class representing generic VPP interface
+ and contains some common functionality, which is then used by derived classes
+* `VppPGInterface`: class representing VPP packet-generator interface.
+ The interface is created/destroyed when the object is created/destroyed.
+* `VppSubInterface`: VPP sub-interface abstract class, containing common
+ functionality for e.g. `VppDot1QSubint` and `VppDot1ADSubint` classes
+
+How VPP API/CLI is called
+#########################
+
+Vpp provides python bindings in a python module called vpp-papi, which the test
+framework installs in the virtual environment. A shim layer represented by
+the `VppPapiProvider` class is built on top of the vpp-papi, serving these
+purposes:
+
+1. Automatic return value checks:
+ After each API is called, the return value is checked against the expected
+ return value (by default 0, but can be overridden) and an exception
+ is raised if the check fails.
+2. Automatic call of hooks:
+
+ a. `before_cli <Hook.before_cli>` and `before_api <Hook.before_api>` hooks
+ are used for debug logging and stepping through the test
+ b. `after_cli <Hook.after_cli>` and `after_api <Hook.after_api>` hooks
+ are used for monitoring the vpp process for crashes
+3. Simplification of API calls:
+ Many of the VPP APIs take a lot of parameters and by providing sane defaults
+ for these, the API is much easier to use in the common case and the code is
+ more readable. E.g. ip_add_del_route API takes ~25 parameters, of which
+ in the common case, only 3 are needed.
+
+Example: how to add a new test
+##############################
+
+In this example, we will describe how to add a new test case which tests VPP...
+
+1. Add a new file called...
+2. Add a setUpClass function containing...
+3. Add the test code in test...
+4. Run the test...
+
+|vtf| module documentation
+##########################
+
+.. toctree::
+ :maxdepth: 2
+ :glob:
+
+ modules.rst
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/vpp/test/framework.py b/vpp/test/framework.py
new file mode 100644
index 00000000..1c3e56cc
--- /dev/null
+++ b/vpp/test/framework.py
@@ -0,0 +1,697 @@
+#!/usr/bin/env python
+
+import subprocess
+import unittest
+import tempfile
+import time
+import resource
+from collections import deque
+from threading import Thread
+from inspect import getdoc
+from hook import StepHook, PollHook
+from vpp_pg_interface import VppPGInterface
+from vpp_lo_interface import VppLoInterface
+from vpp_papi_provider import VppPapiProvider
+from scapy.packet import Raw
+from log import *
+
+"""
+ Test framework module.
+
+ The module provides a set of tools for constructing and running tests and
+ representing the results.
+"""
+
+
+class _PacketInfo(object):
+ """Private class to create packet info object.
+
+ Help process information about the next packet.
+ Set variables to default values.
+ """
+ #: Store the index of the packet.
+ index = -1
+ #: Store the index of the source packet generator interface of the packet.
+ src = -1
+ #: Store the index of the destination packet generator interface
+ #: of the packet.
+ dst = -1
+ #: Store the copy of the former packet.
+ data = None
+
+ def __eq__(self, other):
+ index = self.index == other.index
+ src = self.src == other.src
+ dst = self.dst == other.dst
+ data = self.data == other.data
+ return index and src and dst and data
+
+
+def pump_output(out, deque):
+ for line in iter(out.readline, b''):
+ deque.append(line)
+
+
+class VppTestCase(unittest.TestCase):
+ """This subclass is a base class for VPP test cases that are implemented as
+ classes. It provides methods to create and run test case.
+ """
+
+ @property
+ def packet_infos(self):
+ """List of packet infos"""
+ return self._packet_infos
+
+ @packet_infos.setter
+ def packet_infos(self, value):
+ self._packet_infos = value
+
+ @classmethod
+ def instance(cls):
+ """Return the instance of this testcase"""
+ return cls.test_instance
+
+ @classmethod
+ def set_debug_flags(cls, d):
+ cls.debug_core = False
+ cls.debug_gdb = False
+ cls.debug_gdbserver = False
+ if d is None:
+ return
+ dl = d.lower()
+ if dl == "core":
+ if resource.getrlimit(resource.RLIMIT_CORE)[0] <= 0:
+ # give a heads up if this is actually useless
+ cls.logger.critical("WARNING: core size limit is set 0, core "
+ "files will NOT be created")
+ cls.debug_core = True
+ elif dl == "gdb":
+ cls.debug_gdb = True
+ elif dl == "gdbserver":
+ cls.debug_gdbserver = True
+ else:
+ raise Exception("Unrecognized DEBUG option: '%s'" % d)
+
+ @classmethod
+ def setUpConstants(cls):
+ """ Set-up the test case class based on environment variables """
+ try:
+ s = os.getenv("STEP")
+ cls.step = True if s.lower() in ("y", "yes", "1") else False
+ except:
+ cls.step = False
+ try:
+ d = os.getenv("DEBUG")
+ except:
+ d = None
+ cls.set_debug_flags(d)
+ cls.vpp_bin = os.getenv('VPP_TEST_BIN', "vpp")
+ cls.plugin_path = os.getenv('VPP_TEST_PLUGIN_PATH')
+ debug_cli = ""
+ if cls.step or cls.debug_gdb or cls.debug_gdbserver:
+ debug_cli = "cli-listen localhost:5002"
+ cls.vpp_cmdline = [cls.vpp_bin, "unix", "{", "nodaemon", debug_cli, "}",
+ "api-segment", "{", "prefix", cls.shm_prefix, "}"]
+ if cls.plugin_path is not None:
+ cls.vpp_cmdline.extend(["plugin_path", cls.plugin_path])
+ cls.logger.info("vpp_cmdline: %s" % cls.vpp_cmdline)
+
+ @classmethod
+ def wait_for_enter(cls):
+ if cls.debug_gdbserver:
+ print(double_line_delim)
+ print("Spawned GDB server with PID: %d" % cls.vpp.pid)
+ elif cls.debug_gdb:
+ print(double_line_delim)
+ print("Spawned VPP with PID: %d" % cls.vpp.pid)
+ else:
+ cls.logger.debug("Spawned VPP with PID: %d" % cls.vpp.pid)
+ return
+ print(single_line_delim)
+ print("You can debug the VPP using e.g.:")
+ if cls.debug_gdbserver:
+ print("gdb " + cls.vpp_bin + " -ex 'target remote localhost:7777'")
+ print("Now is the time to attach a gdb by running the above "
+ "command, set up breakpoints etc. and then resume VPP from "
+ "within gdb by issuing the 'continue' command")
+ elif cls.debug_gdb:
+ print("gdb " + cls.vpp_bin + " -ex 'attach %s'" % cls.vpp.pid)
+ print("Now is the time to attach a gdb by running the above "
+ "command and set up breakpoints etc.")
+ print(single_line_delim)
+ raw_input("Press ENTER to continue running the testcase...")
+
+ @classmethod
+ def run_vpp(cls):
+ cmdline = cls.vpp_cmdline
+
+ if cls.debug_gdbserver:
+ gdbserver = '/usr/bin/gdbserver'
+ if not os.path.isfile(gdbserver) or \
+ not os.access(gdbserver, os.X_OK):
+ raise Exception("gdbserver binary '%s' does not exist or is "
+ "not executable" % gdbserver)
+
+ cmdline = [gdbserver, 'localhost:7777'] + cls.vpp_cmdline
+ cls.logger.info("Gdbserver cmdline is %s", " ".join(cmdline))
+
+ try:
+ cls.vpp = subprocess.Popen(cmdline,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ bufsize=1)
+ except Exception as e:
+ cls.logger.critical("Couldn't start vpp: %s" % e)
+ raise
+
+ cls.wait_for_enter()
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Perform class setup before running the testcase
+ Remove shared memory files, start vpp and connect the vpp-api
+ """
+ cls.logger = getLogger(cls.__name__)
+ cls.tempdir = tempfile.mkdtemp(
+ prefix='vpp-unittest-' + cls.__name__ + '-')
+ cls.shm_prefix = cls.tempdir.split("/")[-1]
+ os.chdir(cls.tempdir)
+ cls.logger.info("Temporary dir is %s, shm prefix is %s",
+ cls.tempdir, cls.shm_prefix)
+ cls.setUpConstants()
+ cls._captures = []
+ cls._zombie_captures = []
+ cls.packet_infos = {}
+ cls.verbose = 0
+ cls.vpp_dead = False
+ print(double_line_delim)
+ print(colorize(getdoc(cls).splitlines()[0], YELLOW))
+ print(double_line_delim)
+ # need to catch exceptions here because if we raise, then the cleanup
+ # doesn't get called and we might end with a zombie vpp
+ try:
+ cls.run_vpp()
+ cls.vpp_stdout_deque = deque()
+ cls.vpp_stdout_reader_thread = Thread(target=pump_output, args=(
+ cls.vpp.stdout, cls.vpp_stdout_deque))
+ cls.vpp_stdout_reader_thread.start()
+ cls.vpp_stderr_deque = deque()
+ cls.vpp_stderr_reader_thread = Thread(target=pump_output, args=(
+ cls.vpp.stderr, cls.vpp_stderr_deque))
+ cls.vpp_stderr_reader_thread.start()
+ cls.vapi = VppPapiProvider(cls.shm_prefix, cls.shm_prefix, cls)
+ if cls.step:
+ hook = StepHook(cls)
+ else:
+ hook = PollHook(cls)
+ cls.vapi.register_hook(hook)
+ time.sleep(0.1)
+ hook.poll_vpp()
+ try:
+ cls.vapi.connect()
+ except:
+ if cls.debug_gdbserver:
+ print(colorize("You're running VPP inside gdbserver but "
+ "VPP-API connection failed, did you forget "
+ "to 'continue' VPP from within gdb?", RED))
+ raise
+ except:
+ t, v, tb = sys.exc_info()
+ try:
+ cls.quit()
+ except:
+ pass
+ raise t, v, tb
+
+ @classmethod
+ def quit(cls):
+ """
+ Disconnect vpp-api, kill vpp and cleanup shared memory files
+ """
+ if (cls.debug_gdbserver or cls.debug_gdb) and hasattr(cls, 'vpp'):
+ cls.vpp.poll()
+ if cls.vpp.returncode is None:
+ print(double_line_delim)
+ print("VPP or GDB server is still running")
+ print(single_line_delim)
+ raw_input("When done debugging, press ENTER to kill the process"
+ " and finish running the testcase...")
+
+ if hasattr(cls, 'vpp'):
+ if hasattr(cls, 'vapi'):
+ cls.vapi.disconnect()
+ cls.vpp.poll()
+ if cls.vpp.returncode is None:
+ cls.vpp.terminate()
+ del cls.vpp
+
+ if hasattr(cls, 'vpp_stdout_deque'):
+ cls.logger.info(single_line_delim)
+ cls.logger.info('VPP output to stdout while running %s:',
+ cls.__name__)
+ cls.logger.info(single_line_delim)
+ f = open(cls.tempdir + '/vpp_stdout.txt', 'w')
+ vpp_output = "".join(cls.vpp_stdout_deque)
+ f.write(vpp_output)
+ cls.logger.info('\n%s', vpp_output)
+ cls.logger.info(single_line_delim)
+
+ if hasattr(cls, 'vpp_stderr_deque'):
+ cls.logger.info(single_line_delim)
+ cls.logger.info('VPP output to stderr while running %s:',
+ cls.__name__)
+ cls.logger.info(single_line_delim)
+ f = open(cls.tempdir + '/vpp_stderr.txt', 'w')
+ vpp_output = "".join(cls.vpp_stderr_deque)
+ f.write(vpp_output)
+ cls.logger.info('\n%s', vpp_output)
+ cls.logger.info(single_line_delim)
+
+ @classmethod
+ def tearDownClass(cls):
+ """ Perform final cleanup after running all tests in this test-case """
+ cls.quit()
+
+ def tearDown(self):
+ """ Show various debug prints after each test """
+ if not self.vpp_dead:
+ self.logger.debug(self.vapi.cli("show trace"))
+ self.logger.info(self.vapi.ppcli("show int"))
+ self.logger.info(self.vapi.ppcli("show hardware"))
+ self.logger.info(self.vapi.ppcli("show error"))
+ self.logger.info(self.vapi.ppcli("show run"))
+
+ def setUp(self):
+ """ Clear trace before running each test"""
+ if self.vpp_dead:
+ raise Exception("VPP is dead when setting up the test")
+ time.sleep(.1)
+ self.vpp_stdout_deque.append(
+ "--- test setUp() for %s.%s(%s) starts here ---\n" %
+ (self.__class__.__name__, self._testMethodName,
+ self._testMethodDoc))
+ self.vpp_stderr_deque.append(
+ "--- test setUp() for %s.%s(%s) starts here ---\n" %
+ (self.__class__.__name__, self._testMethodName,
+ self._testMethodDoc))
+ self.vapi.cli("clear trace")
+ # store the test instance inside the test class - so that objects
+ # holding the class can access instance methods (like assertEqual)
+ type(self).test_instance = self
+
+ @classmethod
+ def pg_enable_capture(cls, interfaces):
+ """
+ Enable capture on packet-generator interfaces
+
+ :param interfaces: iterable interface indexes
+
+ """
+ for i in interfaces:
+ i.enable_capture()
+
+ @classmethod
+ def register_capture(cls, cap_name):
+ """ Register a capture in the testclass """
+ # add to the list of captures with current timestamp
+ cls._captures.append((time.time(), cap_name))
+ # filter out from zombies
+ cls._zombie_captures = [(stamp, name)
+ for (stamp, name) in cls._zombie_captures
+ if name != cap_name]
+
+ @classmethod
+ def pg_start(cls):
+ """ Remove any zombie captures and enable the packet generator """
+ # how long before capture is allowed to be deleted - otherwise vpp
+ # crashes - 100ms seems enough (this shouldn't be needed at all)
+ capture_ttl = 0.1
+ now = time.time()
+ for stamp, cap_name in cls._zombie_captures:
+ wait = stamp + capture_ttl - now
+ if wait > 0:
+ cls.logger.debug("Waiting for %ss before deleting capture %s",
+ wait, cap_name)
+ time.sleep(wait)
+ now = time.time()
+ cls.logger.debug("Removing zombie capture %s" % cap_name)
+ cls.vapi.cli('packet-generator delete %s' % cap_name)
+
+ cls.vapi.cli("trace add pg-input 50") # 50 is maximum
+ cls.vapi.cli('packet-generator enable')
+ cls._zombie_captures = cls._captures
+ cls._captures = []
+
+ @classmethod
+ def create_pg_interfaces(cls, interfaces):
+ """
+ Create packet-generator interfaces
+
+ :param interfaces: iterable indexes of the interfaces
+
+ """
+ result = []
+ for i in interfaces:
+ intf = VppPGInterface(cls, i)
+ setattr(cls, intf.name, intf)
+ result.append(intf)
+ cls.pg_interfaces = result
+ return result
+
+ @classmethod
+ def create_loopback_interfaces(cls, interfaces):
+ """
+ Create loopback interfaces
+
+ :param interfaces: iterable indexes of the interfaces
+
+ """
+ result = []
+ for i in interfaces:
+ intf = VppLoInterface(cls, i)
+ setattr(cls, intf.name, intf)
+ result.append(intf)
+ cls.lo_interfaces = result
+ return result
+
+ @staticmethod
+ def extend_packet(packet, size):
+ """
+ Extend packet to given size by padding with spaces
+ NOTE: Currently works only when Raw layer is present.
+
+ :param packet: packet
+ :param size: target size
+
+ """
+ packet_len = len(packet) + 4
+ extend = size - packet_len
+ if extend > 0:
+ packet[Raw].load += ' ' * extend
+
+ def add_packet_info_to_list(self, info):
+ """
+ Add packet info to the testcase's packet info list
+
+ :param info: packet info
+
+ """
+ info.index = len(self.packet_infos)
+ self.packet_infos[info.index] = info
+
+ def create_packet_info(self, src_pg_index, dst_pg_index):
+ """
+ Create packet info object containing the source and destination indexes
+ and add it to the testcase's packet info list
+
+ :param src_pg_index: source packet-generator index
+ :param dst_pg_index: destination packet-generator index
+
+ :returns: _PacketInfo object
+
+ """
+ info = _PacketInfo()
+ self.add_packet_info_to_list(info)
+ info.src = src_pg_index
+ info.dst = dst_pg_index
+ return info
+
+ @staticmethod
+ def info_to_payload(info):
+ """
+ Convert _PacketInfo object to packet payload
+
+ :param info: _PacketInfo object
+
+ :returns: string containing serialized data from packet info
+ """
+ return "%d %d %d" % (info.index, info.src, info.dst)
+
+ @staticmethod
+ def payload_to_info(payload):
+ """
+ Convert packet payload to _PacketInfo object
+
+ :param payload: packet payload
+
+ :returns: _PacketInfo object containing de-serialized data from payload
+
+ """
+ numbers = payload.split()
+ info = _PacketInfo()
+ info.index = int(numbers[0])
+ info.src = int(numbers[1])
+ info.dst = int(numbers[2])
+ return info
+
+ def get_next_packet_info(self, info):
+ """
+ Iterate over the packet info list stored in the testcase
+ Start iteration with first element if info is None
+ Continue based on index in info if info is specified
+
+ :param info: info or None
+ :returns: next info in list or None if no more infos
+ """
+ if info is None:
+ next_index = 0
+ else:
+ next_index = info.index + 1
+ if next_index == len(self.packet_infos):
+ return None
+ else:
+ return self.packet_infos[next_index]
+
+ def get_next_packet_info_for_interface(self, src_index, info):
+ """
+ Search the packet info list for the next packet info with same source
+ interface index
+
+ :param src_index: source interface index to search for
+ :param info: packet info - where to start the search
+ :returns: packet info or None
+
+ """
+ while True:
+ info = self.get_next_packet_info(info)
+ if info is None:
+ return None
+ if info.src == src_index:
+ return info
+
+ def get_next_packet_info_for_interface2(self, src_index, dst_index, info):
+ """
+ Search the packet info list for the next packet info with same source
+ and destination interface indexes
+
+ :param src_index: source interface index to search for
+ :param dst_index: destination interface index to search for
+ :param info: packet info - where to start the search
+ :returns: packet info or None
+
+ """
+ while True:
+ info = self.get_next_packet_info_for_interface(src_index, info)
+ if info is None:
+ return None
+ if info.dst == dst_index:
+ return info
+
+ def assert_equal(self, real_value, expected_value, name_or_class=None):
+ if name_or_class is None:
+ self.assertEqual(real_value, expected_value, msg)
+ return
+ try:
+ msg = "Invalid %s: %d('%s') does not match expected value %d('%s')"
+ msg = msg % (getdoc(name_or_class).strip(),
+ real_value, str(name_or_class(real_value)),
+ expected_value, str(name_or_class(expected_value)))
+ except:
+ msg = "Invalid %s: %s does not match expected value %s" % (
+ name_or_class, real_value, expected_value)
+
+ self.assertEqual(real_value, expected_value, msg)
+
+ def assert_in_range(
+ self,
+ real_value,
+ expected_min,
+ expected_max,
+ name=None):
+ if name is None:
+ msg = None
+ else:
+ msg = "Invalid %s: %s out of range <%s,%s>" % (
+ name, real_value, expected_min, expected_max)
+ self.assertTrue(expected_min <= real_value <= expected_max, msg)
+
+
+class VppTestResult(unittest.TestResult):
+ """
+ @property result_string
+ String variable to store the test case result string.
+ @property errors
+ List variable containing 2-tuples of TestCase instances and strings
+ holding formatted tracebacks. Each tuple represents a test which
+ raised an unexpected exception.
+ @property failures
+ List variable containing 2-tuples of TestCase instances and strings
+ holding formatted tracebacks. Each tuple represents a test where
+ a failure was explicitly signalled using the TestCase.assert*()
+ methods.
+ """
+
+ def __init__(self, stream, descriptions, verbosity):
+ """
+ :param stream File descriptor to store where to report test results. Set
+ to the standard error stream by default.
+ :param descriptions Boolean variable to store information if to use test
+ case descriptions.
+ :param verbosity Integer variable to store required verbosity level.
+ """
+ unittest.TestResult.__init__(self, stream, descriptions, verbosity)
+ self.stream = stream
+ self.descriptions = descriptions
+ self.verbosity = verbosity
+ self.result_string = None
+
+ def addSuccess(self, test):
+ """
+ Record a test succeeded result
+
+ :param test:
+
+ """
+ unittest.TestResult.addSuccess(self, test)
+ self.result_string = colorize("OK", GREEN)
+
+ def addSkip(self, test, reason):
+ """
+ Record a test skipped.
+
+ :param test:
+ :param reason:
+
+ """
+ unittest.TestResult.addSkip(self, test, reason)
+ self.result_string = colorize("SKIP", YELLOW)
+
+ def addFailure(self, test, err):
+ """
+ Record a test failed result
+
+ :param test:
+ :param err: error message
+
+ """
+ unittest.TestResult.addFailure(self, test, err)
+ if hasattr(test, 'tempdir'):
+ self.result_string = colorize("FAIL", RED) + \
+ ' [ temp dir used by test case: ' + test.tempdir + ' ]'
+ else:
+ self.result_string = colorize("FAIL", RED) + ' [no temp dir]'
+
+ def addError(self, test, err):
+ """
+ Record a test error result
+
+ :param test:
+ :param err: error message
+
+ """
+ unittest.TestResult.addError(self, test, err)
+ if hasattr(test, 'tempdir'):
+ self.result_string = colorize("ERROR", RED) + \
+ ' [ temp dir used by test case: ' + test.tempdir + ' ]'
+ else:
+ self.result_string = colorize("ERROR", RED) + ' [no temp dir]'
+
+ def getDescription(self, test):
+ """
+ Get test description
+
+ :param test:
+ :returns: test description
+
+ """
+ # TODO: if none print warning not raise exception
+ short_description = test.shortDescription()
+ if self.descriptions and short_description:
+ return short_description
+ else:
+ return str(test)
+
+ def startTest(self, test):
+ """
+ Start a test
+
+ :param test:
+
+ """
+ unittest.TestResult.startTest(self, test)
+ if self.verbosity > 0:
+ self.stream.writeln(
+ "Starting " + self.getDescription(test) + " ...")
+ self.stream.writeln(single_line_delim)
+
+ def stopTest(self, test):
+ """
+ Stop a test
+
+ :param test:
+
+ """
+ unittest.TestResult.stopTest(self, test)
+ if self.verbosity > 0:
+ self.stream.writeln(single_line_delim)
+ self.stream.writeln("%-60s%s" %
+ (self.getDescription(test), self.result_string))
+ self.stream.writeln(single_line_delim)
+ else:
+ self.stream.writeln("%-60s%s" %
+ (self.getDescription(test), self.result_string))
+
+ def printErrors(self):
+ """
+ Print errors from running the test case
+ """
+ self.stream.writeln()
+ self.printErrorList('ERROR', self.errors)
+ self.printErrorList('FAIL', self.failures)
+
+ def printErrorList(self, flavour, errors):
+ """
+ Print error list to the output stream together with error type
+ and test case description.
+
+ :param flavour: error type
+ :param errors: iterable errors
+
+ """
+ for test, err in errors:
+ self.stream.writeln(double_line_delim)
+ self.stream.writeln("%s: %s" %
+ (flavour, self.getDescription(test)))
+ self.stream.writeln(single_line_delim)
+ self.stream.writeln("%s" % err)
+
+
+class VppTestRunner(unittest.TextTestRunner):
+ """
+ A basic test runner implementation which prints results on standard error.
+ """
+ @property
+ def resultclass(self):
+ """Class maintaining the results of the tests"""
+ return VppTestResult
+
+ def run(self, test):
+ """
+ Run the tests
+
+ :param test:
+
+ """
+ print("Running tests using custom test runner") # debug message
+ return super(VppTestRunner, self).run(test)
diff --git a/vpp/test/hook.py b/vpp/test/hook.py
new file mode 100644
index 00000000..f3e5f880
--- /dev/null
+++ b/vpp/test/hook.py
@@ -0,0 +1,227 @@
+import signal
+import os
+import pexpect
+import traceback
+from log import *
+
+
+class Hook(object):
+ """
+ Generic hooks before/after API/CLI calls
+ """
+
+ def __init__(self, logger):
+ self.logger = logger
+
+ def before_api(self, api_name, api_args):
+ """
+ Function called before API call
+ Emit a debug message describing the API name and arguments
+
+ @param api_name: name of the API
+ @param api_args: tuple containing the API arguments
+ """
+ self.logger.debug("API: %s (%s)" %
+ (api_name, api_args), extra={'color': RED})
+
+ def after_api(self, api_name, api_args):
+ """
+ Function called after API call
+
+ @param api_name: name of the API
+ @param api_args: tuple containing the API arguments
+ """
+ pass
+
+ def before_cli(self, cli):
+ """
+ Function called before CLI call
+ Emit a debug message describing the CLI
+
+ @param cli: CLI string
+ """
+ self.logger.debug("CLI: %s" % (cli), extra={'color': RED})
+
+ def after_cli(self, cli):
+ """
+ Function called after CLI call
+ """
+ pass
+
+
+class VppDiedError(Exception):
+ pass
+
+
+class PollHook(Hook):
+ """ Hook which checks if the vpp subprocess is alive """
+
+ def __init__(self, testcase):
+ self.testcase = testcase
+ self.logger = testcase.logger
+
+ def spawn_gdb(self, gdb_path, core_path):
+ gdb_cmdline = gdb_path + ' ' + self.testcase.vpp_bin + ' ' + core_path
+ gdb = pexpect.spawn(gdb_cmdline)
+ gdb.interact()
+ try:
+ gdb.terminate(True)
+ except:
+ pass
+ if gdb.isalive():
+ raise Exception("GDB refused to die...")
+
+ def on_crash(self, core_path):
+ if self.testcase.debug_core:
+ gdb_path = '/usr/bin/gdb'
+ if os.path.isfile(gdb_path) and os.access(gdb_path, os.X_OK):
+ # automatically attach gdb
+ self.spawn_gdb(gdb_path, core_path)
+ return
+ else:
+ self.logger.error(
+ "Debugger '%s' does not exist or is not an executable.." %
+ gdb_path)
+
+ self.logger.critical('core file present, debug with: gdb ' +
+ self.testcase.vpp_bin + ' ' + core_path)
+
+ def poll_vpp(self):
+ """
+ Poll the vpp status and throw an exception if it's not running
+ :raises VppDiedError: exception if VPP is not running anymore
+ """
+ if self.testcase.vpp_dead:
+ # already dead, nothing to do
+ return
+
+ self.testcase.vpp.poll()
+ if self.testcase.vpp.returncode is not None:
+ signaldict = dict(
+ (k, v) for v, k in reversed(sorted(signal.__dict__.items()))
+ if v.startswith('SIG') and not v.startswith('SIG_'))
+
+ if self.testcase.vpp.returncode in signaldict:
+ s = signaldict[abs(self.testcase.vpp.returncode)]
+ else:
+ s = "unknown"
+ msg = "VPP subprocess died unexpectedly with returncode %d [%s]" % (
+ self.testcase.vpp.returncode, s)
+ self.logger.critical(msg)
+ core_path = self.testcase.tempdir + '/core'
+ if os.path.isfile(core_path):
+ self.on_crash(core_path)
+ self.testcase.vpp_dead = True
+ raise VppDiedError(msg)
+
+ def before_api(self, api_name, api_args):
+ """
+ Check if VPP died before executing an API
+
+ :param api_name: name of the API
+ :param api_args: tuple containing the API arguments
+ :raises VppDiedError: exception if VPP is not running anymore
+
+ """
+ super(PollHook, self).before_api(api_name, api_args)
+ self.poll_vpp()
+
+ def before_cli(self, cli):
+ """
+ Check if VPP died before executing a CLI
+
+ :param cli: CLI string
+ :raises Exception: exception if VPP is not running anymore
+
+ """
+ super(PollHook, self).before_cli(cli)
+ self.poll_vpp()
+
+
+class StepHook(PollHook):
+ """ Hook which requires user to press ENTER before doing any API/CLI """
+
+ def __init__(self, testcase):
+ self.skip_stack = None
+ self.skip_num = None
+ self.skip_count = 0
+ super(StepHook, self).__init__(testcase)
+
+ def skip(self):
+ if self.skip_stack is None:
+ return False
+ stack = traceback.extract_stack()
+ counter = 0
+ skip = True
+ for e in stack:
+ if counter > self.skip_num:
+ break
+ if e[0] != self.skip_stack[counter][0]:
+ skip = False
+ if e[1] != self.skip_stack[counter][1]:
+ skip = False
+ counter += 1
+ if skip:
+ self.skip_count += 1
+ return True
+ else:
+ print("%d API/CLI calls skipped in specified stack "
+ "frame" % self.skip_count)
+ self.skip_count = 0
+ self.skip_stack = None
+ self.skip_num = None
+ return False
+
+ def user_input(self):
+ print('number\tfunction\tfile\tcode')
+ counter = 0
+ stack = traceback.extract_stack()
+ for e in stack:
+ print('%02d.\t%s\t%s:%d\t[%s]' % (counter, e[2], e[0], e[1], e[3]))
+ counter += 1
+ print(single_line_delim)
+ print("You can enter a number of stack frame chosen from above")
+ print("Calls in/below that stack frame will be not be stepped anymore")
+ print(single_line_delim)
+ while True:
+ choice = raw_input("Enter your choice, if any, and press ENTER to "
+ "continue running the testcase...")
+ if choice == "":
+ choice = None
+ try:
+ if choice is not None:
+ num = int(choice)
+ except:
+ print("Invalid input")
+ continue
+ if choice is not None and (num < 0 or num >= len(stack)):
+ print("Invalid choice")
+ continue
+ break
+ if choice is not None:
+ self.skip_stack = stack
+ self.skip_num = num
+
+ def before_cli(self, cli):
+ """ Wait for ENTER before executing CLI """
+ if self.skip():
+ print("Skip pause before executing CLI: %s" % cli)
+ else:
+ print(double_line_delim)
+ print("Test paused before executing CLI: %s" % cli)
+ print(single_line_delim)
+ self.user_input()
+ super(StepHook, self).before_cli(cli)
+
+ def before_api(self, api_name, api_args):
+ """ Wait for ENTER before executing API """
+ if self.skip():
+ print("Skip pause before executing API: %s (%s)"
+ % (api_name, api_args))
+ else:
+ print(double_line_delim)
+ print("Test paused before executing API: %s (%s)"
+ % (api_name, api_args))
+ print(single_line_delim)
+ self.user_input()
+ super(StepHook, self).before_api(api_name, api_args)
diff --git a/vpp/test/log.py b/vpp/test/log.py
new file mode 100644
index 00000000..5a6219ce
--- /dev/null
+++ b/vpp/test/log.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+
+import sys
+import os
+import logging
+
+""" @var formatting delimiter consisting of '=' characters """
+double_line_delim = '=' * 70
+""" @var formatting delimiter consisting of '-' characters """
+single_line_delim = '-' * 70
+
+
+def colorize(msg, color):
+ return color + msg + COLOR_RESET
+
+
+class ColorFormatter(logging.Formatter):
+
+ def init(self, fmt=None, datefmt=None):
+ super(ColorFormatter, self).__init__(fmt, datefmt)
+
+ def format(self, record):
+ message = super(ColorFormatter, self).format(record)
+ if hasattr(record, 'color'):
+ message = colorize(message, record.color)
+ return message
+
+handler = logging.StreamHandler(sys.stdout)
+handler.setFormatter(ColorFormatter(fmt='%(asctime)s,%(msecs)03d %(message)s',
+ datefmt="%H:%M:%S"))
+
+global_logger = logging.getLogger()
+global_logger.addHandler(handler)
+try:
+ verbose = int(os.getenv("V", 0))
+except:
+ verbose = 0
+
+# 40 = ERROR, 30 = WARNING, 20 = INFO, 10 = DEBUG, 0 = NOTSET (all messages)
+if verbose >= 2:
+ log_level = 10
+elif verbose == 1:
+ log_level = 20
+else:
+ log_level = 40
+
+scapy_logger = logging.getLogger("scapy.runtime")
+scapy_logger.setLevel(logging.ERROR)
+
+
+def getLogger(name):
+ logger = logging.getLogger(name)
+ logger.setLevel(log_level)
+ return logger
+
+# Static variables to store color formatting strings.
+#
+# These variables (RED, GREEN, YELLOW and LPURPLE) are used to configure
+# the color of the text to be printed in the terminal. Variable COLOR_RESET
+# is used to revert the text color to the default one.
+if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty():
+ RED = '\033[91m'
+ GREEN = '\033[92m'
+ YELLOW = '\033[93m'
+ LPURPLE = '\033[94m'
+ COLOR_RESET = '\033[0m'
+else:
+ RED = ''
+ GREEN = ''
+ YELLOW = ''
+ LPURPLE = ''
+ COLOR_RESET = ''
diff --git a/vpp/test/patches/scapy-2.3.3/gre-layers.patch b/vpp/test/patches/scapy-2.3.3/gre-layers.patch
new file mode 100644
index 00000000..605a705b
--- /dev/null
+++ b/vpp/test/patches/scapy-2.3.3/gre-layers.patch
@@ -0,0 +1,25 @@
+diff --git a/scapy/layers/inet6.py b/scapy/layers/inet6.py
+index 03b80ec..a7e1e0f 100644
+--- a/scapy/layers/inet6.py
++++ b/scapy/layers/inet6.py
+@@ -3722,6 +3722,7 @@ conf.l2types.register(31, IPv6)
+
+ bind_layers(Ether, IPv6, type = 0x86dd )
+ bind_layers(CookedLinux, IPv6, proto = 0x86dd )
++bind_layers(GRE, IPv6, proto = 0x86dd )
+ bind_layers(IPerror6, TCPerror, nh = socket.IPPROTO_TCP )
+ bind_layers(IPerror6, UDPerror, nh = socket.IPPROTO_UDP )
+ bind_layers(IPv6, TCP, nh = socket.IPPROTO_TCP )
+diff --git a/scapy/layers/l2.py b/scapy/layers/l2.py
+index 4f491d2..661a5da 100644
+--- a/scapy/layers/l2.py
++++ b/scapy/layers/l2.py
+@@ -628,7 +628,7 @@ bind_layers( CookedLinux, EAPOL, proto=34958)
+ bind_layers( GRE, LLC, proto=122)
+ bind_layers( GRE, Dot1Q, proto=33024)
+ bind_layers( GRE, Dot1AD, type=0x88a8)
+-bind_layers( GRE, Ether, proto=1)
++bind_layers( GRE, Ether, proto=0x6558)
+ bind_layers( GRE, ARP, proto=2054)
+ bind_layers( GRE, EAPOL, proto=34958)
+ bind_layers( GRE, GRErouting, { "routing_present" : 1 } )
diff --git a/vpp/test/patches/scapy-2.3.3/mpls.py.patch b/vpp/test/patches/scapy-2.3.3/mpls.py.patch
new file mode 100644
index 00000000..5c819110
--- /dev/null
+++ b/vpp/test/patches/scapy-2.3.3/mpls.py.patch
@@ -0,0 +1,13 @@
+diff --git a/scapy/contrib/mpls.py b/scapy/contrib/mpls.py
+index 640a0c5..6af1d4a 100644
+--- a/scapy/contrib/mpls.py
++++ b/scapy/contrib/mpls.py
+@@ -18,6 +18,8 @@ class MPLS(Packet):
+
+ def guess_payload_class(self, payload):
+ if len(payload) >= 1:
++ if not self.s:
++ return MPLS
+ ip_version = (ord(payload[0]) >> 4) & 0xF
+ if ip_version == 4:
+ return IP
diff --git a/vpp/test/run_tests.py b/vpp/test/run_tests.py
new file mode 100644
index 00000000..8f2174b1
--- /dev/null
+++ b/vpp/test/run_tests.py
@@ -0,0 +1,12 @@
+#!/usr/bin/env python
+
+import os
+import unittest
+from framework import VppTestRunner
+
+if __name__ == '__main__':
+ try:
+ verbose = int(os.getenv("V", 0))
+ except:
+ verbose = 0
+ unittest.main(testRunner=VppTestRunner, module=None, verbosity=verbose)
diff --git a/vpp/test/scapy_handlers/__init__.py b/vpp/test/scapy_handlers/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/vpp/test/scapy_handlers/__init__.py
diff --git a/vpp/test/template_bd.py b/vpp/test/template_bd.py
new file mode 100644
index 00000000..e240a280
--- /dev/null
+++ b/vpp/test/template_bd.py
@@ -0,0 +1,174 @@
+#!/usr/bin/env python
+
+from abc import abstractmethod, ABCMeta
+
+from scapy.layers.l2 import Ether, Raw
+from scapy.layers.inet import IP, UDP
+
+
+class BridgeDomain(object):
+ """ Bridge domain abstraction """
+ __metaclass__ = ABCMeta
+
+ @property
+ def frame_request(self):
+ """ Ethernet frame modeling a generic request """
+ return (Ether(src='00:00:00:00:00:01', dst='00:00:00:00:00:02') /
+ IP(src='1.2.3.4', dst='4.3.2.1') /
+ UDP(sport=10000, dport=20000) /
+ Raw('\xa5' * 100))
+
+ @property
+ def frame_reply(self):
+ """ Ethernet frame modeling a generic reply """
+ return (Ether(src='00:00:00:00:00:02', dst='00:00:00:00:00:01') /
+ IP(src='4.3.2.1', dst='1.2.3.4') /
+ UDP(sport=20000, dport=10000) /
+ Raw('\xa5' * 100))
+
+ @abstractmethod
+ def encap_mcast(self, pkt, src_ip, src_mac, vni):
+ """ Encapsulate mcast packet """
+ pass
+
+ @abstractmethod
+ def encapsulate(self, pkt, vni):
+ """ Encapsulate packet """
+ pass
+
+ @abstractmethod
+ def decapsulate(self, pkt):
+ """ Decapsulate packet """
+ pass
+
+ @abstractmethod
+ def check_encapsulation(self, pkt, vni, local_only=False):
+ """ Verify the encapsulation """
+ pass
+
+ def assert_eq_pkts(self, pkt1, pkt2):
+ """ Verify the Ether, IP, UDP, payload are equal in both
+ packets
+ """
+ self.assertEqual(pkt1[Ether].src, pkt2[Ether].src)
+ self.assertEqual(pkt1[Ether].dst, pkt2[Ether].dst)
+ self.assertEqual(pkt1[IP].src, pkt2[IP].src)
+ self.assertEqual(pkt1[IP].dst, pkt2[IP].dst)
+ self.assertEqual(pkt1[UDP].sport, pkt2[UDP].sport)
+ self.assertEqual(pkt1[UDP].dport, pkt2[UDP].dport)
+ self.assertEqual(pkt1[Raw], pkt2[Raw])
+
+ def test_decap(self):
+ """ Decapsulation test
+ Send encapsulated frames from pg0
+ Verify receipt of decapsulated frames on pg1
+ """
+
+ encapsulated_pkt = self.encapsulate(self.frame_request,
+ self.single_tunnel_bd)
+
+ self.pg0.add_stream([encapsulated_pkt, ])
+
+ self.pg1.enable_capture()
+
+ self.pg_start()
+
+ # Pick first received frame and check if it's the non-encapsulated frame
+ out = self.pg1.get_capture()
+ self.assertEqual(len(out), 1,
+ 'Invalid number of packets on '
+ 'output: {}'.format(len(out)))
+ pkt = out[0]
+ self.assert_eq_pkts(pkt, self.frame_request)
+
+ def test_encap(self):
+ """ Encapsulation test
+ Send frames from pg1
+ Verify receipt of encapsulated frames on pg0
+ """
+ self.pg1.add_stream([self.frame_reply])
+
+ self.pg0.enable_capture()
+
+ self.pg_start()
+
+ # Pick first received frame and check if it's corectly encapsulated.
+ out = self.pg0.get_capture()
+ self.assertEqual(len(out), 1,
+ 'Invalid number of packets on '
+ 'output: {}'.format(len(out)))
+ pkt = out[0]
+ self.check_encapsulation(pkt, self.single_tunnel_bd)
+
+ payload = self.decapsulate(pkt)
+ self.assert_eq_pkts(payload, self.frame_reply)
+
+ def test_ucast_flood(self):
+ """ Unicast flood test
+ Send frames from pg3
+ Verify receipt of encapsulated frames on pg0
+ """
+ self.pg3.add_stream([self.frame_reply])
+
+ self.pg0.enable_capture()
+
+ self.pg_start()
+
+ # Get packet from each tunnel and assert it's corectly encapsulated.
+ out = self.pg0.get_capture()
+ self.assertEqual(len(out), 10,
+ 'Invalid number of packets on '
+ 'output: {}'.format(len(out)))
+ for pkt in out:
+ self.check_encapsulation(pkt, self.ucast_flood_bd, True)
+ payload = self.decapsulate(pkt)
+ self.assert_eq_pkts(payload, self.frame_reply)
+
+ def test_mcast_flood(self):
+ """ Multicast flood test
+ Send frames from pg2
+ Verify receipt of encapsulated frames on pg0
+ """
+ self.pg2.add_stream([self.frame_reply])
+
+ self.pg0.enable_capture()
+
+ self.pg_start()
+
+ # Pick first received frame and check if it's corectly encapsulated.
+ out = self.pg0.get_capture()
+ self.assertEqual(len(out), 1,
+ 'Invalid number of packets on '
+ 'output: {}'.format(len(out)))
+ pkt = out[0]
+ self.check_encapsulation(pkt, self.mcast_flood_bd, True)
+
+ payload = self.decapsulate(pkt)
+ self.assert_eq_pkts(payload, self.frame_reply)
+
+ @staticmethod
+ def ipn_to_ip(ipn):
+ return '.'.join(str(i) for i in bytearray(ipn))
+
+ def test_mcast_rcv(self):
+ """ Multicast receive test
+ Send 20 encapsulated frames from pg0 only 10 match unicast tunnels
+ Verify receipt of 10 decap frames on pg2
+ """
+ mac = self.pg0.remote_mac
+ ip_range_start = 10
+ ip_range_end = 30
+ mcast_stream = [
+ self.encap_mcast(self.frame_request, self.ipn_to_ip(ip), mac,
+ self.mcast_flood_bd)
+ for ip in self.ip4_range(self.pg0.remote_ip4n,
+ ip_range_start, ip_range_end)]
+ self.pg0.add_stream(mcast_stream)
+ self.pg2.enable_capture()
+ self.pg_start()
+ out = self.pg2.get_capture()
+ self.assertEqual(len(out), 10,
+ 'Invalid number of packets on '
+ 'output: {}'.format(len(out)))
+ for pkt in out:
+ self.assert_eq_pkts(pkt, self.frame_request)
diff --git a/vpp/test/test_bfd.py b/vpp/test/test_bfd.py
new file mode 100644
index 00000000..87a5ea4b
--- /dev/null
+++ b/vpp/test/test_bfd.py
@@ -0,0 +1,275 @@
+#!/usr/bin/env python
+
+import unittest
+import time
+from random import randint
+from bfd import *
+from framework import *
+from util import ppp
+
+
+class BFDAPITestCase(VppTestCase):
+ """Bidirectional Forwarding Detection (BFD) - API"""
+
+ @classmethod
+ def setUpClass(cls):
+ super(BFDAPITestCase, cls).setUpClass()
+
+ try:
+ cls.create_pg_interfaces([0])
+ cls.pg0.config_ip4()
+ cls.pg0.resolve_arp()
+
+ except Exception:
+ super(BFDAPITestCase, cls).tearDownClass()
+ raise
+
+ def test_add_bfd(self):
+ """ create a BFD session """
+ session = VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip4)
+ session.add_vpp_config()
+ self.logger.debug("Session state is %s" % str(session.state))
+ session.remove_vpp_config()
+ session = VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip4)
+ session.add_vpp_config()
+ self.logger.debug("Session state is %s" % str(session.state))
+ session.remove_vpp_config()
+
+ def test_double_add(self):
+ """ create the same BFD session twice (negative case) """
+ session = VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip4)
+ session.add_vpp_config()
+ try:
+ session.add_vpp_config()
+ except:
+ session.remove_vpp_config()
+ return
+ session.remove_vpp_config()
+ raise Exception("Expected failure while adding duplicate "
+ "configuration")
+
+
+def create_packet(interface, ttl=255, src_port=50000, **kwargs):
+ p = (Ether(src=interface.remote_mac, dst=interface.local_mac) /
+ IP(src=interface.remote_ip4, dst=interface.local_ip4, ttl=ttl) /
+ UDP(sport=src_port, dport=BFD.udp_dport) /
+ BFD(*kwargs))
+ return p
+
+
+def verify_ip(test, packet, local_ip, remote_ip):
+ """ Verify correctness of IP layer. """
+ ip = packet[IP]
+ test.assert_equal(ip.src, local_ip, "IP source address")
+ test.assert_equal(ip.dst, remote_ip, "IP destination address")
+ test.assert_equal(ip.ttl, 255, "IP TTL")
+
+
+def verify_udp(test, packet):
+ """ Verify correctness of UDP layer. """
+ udp = packet[UDP]
+ test.assert_equal(udp.dport, BFD.udp_dport, "UDP destination port")
+ test.assert_in_range(udp.sport, BFD.udp_sport_min, BFD.udp_sport_max,
+ "UDP source port")
+
+
+class BFDTestSession(object):
+
+ def __init__(self, test, interface, detect_mult=3):
+ self.test = test
+ self.interface = interface
+ self.bfd_values = {
+ 'my_discriminator': 0,
+ 'desired_min_tx_interval': 100000,
+ 'detect_mult': detect_mult,
+ 'diag': BFDDiagCode.no_diagnostic,
+ }
+
+ def update(self, **kwargs):
+ self.bfd_values.update(kwargs)
+
+ def create_packet(self):
+ packet = create_packet(self.interface)
+ for name, value in self.bfd_values.iteritems():
+ packet[BFD].setfieldval(name, value)
+ return packet
+
+ def send_packet(self):
+ p = self.create_packet()
+ self.test.logger.debug(ppp("Sending packet:", p))
+ self.test.pg0.add_stream([p])
+ self.test.pg_start()
+
+ def verify_packet(self, packet):
+ """ Verify correctness of BFD layer. """
+ bfd = packet[BFD]
+ self.test.assert_equal(bfd.version, 1, "BFD version")
+ self.test.assert_equal(bfd.your_discriminator,
+ self.bfd_values['my_discriminator'],
+ "BFD - your discriminator")
+
+
+class BFDTestCase(VppTestCase):
+ """Bidirectional Forwarding Detection (BFD)"""
+
+ @classmethod
+ def setUpClass(cls):
+ super(BFDTestCase, cls).setUpClass()
+ try:
+ cls.create_pg_interfaces([0])
+ cls.pg0.config_ip4()
+ cls.pg0.generate_remote_hosts()
+ cls.pg0.configure_ipv4_neighbors()
+ cls.pg0.admin_up()
+ cls.pg0.resolve_arp()
+
+ except Exception:
+ super(BFDTestCase, cls).tearDownClass()
+ raise
+
+ def setUp(self):
+ super(BFDTestCase, self).setUp()
+ self.vapi.want_bfd_events()
+ self.vpp_session = VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip4)
+ self.vpp_session.add_vpp_config()
+ self.vpp_session.admin_up()
+ self.test_session = BFDTestSession(self, self.pg0)
+
+ def tearDown(self):
+ self.vapi.want_bfd_events(enable_disable=0)
+ self.vapi.collect_events() # clear the event queue
+ if not self.vpp_dead:
+ self.vpp_session.remove_vpp_config()
+ super(BFDTestCase, self).tearDown()
+
+ def verify_event(self, event, expected_state):
+ """ Verify correctness of event values. """
+ e = event
+ self.logger.debug("BFD: Event: %s" % repr(e))
+ self.assert_equal(e.bs_index, self.vpp_session.bs_index,
+ "BFD session index")
+ self.assert_equal(e.sw_if_index, self.vpp_session.interface.sw_if_index,
+ "BFD interface index")
+ is_ipv6 = 0
+ if self.vpp_session.af == AF_INET6:
+ is_ipv6 = 1
+ self.assert_equal(e.is_ipv6, is_ipv6, "is_ipv6")
+ if self.vpp_session.af == AF_INET:
+ self.assert_equal(e.local_addr[:4], self.vpp_session.local_addr_n,
+ "Local IPv4 address")
+ self.assert_equal(e.peer_addr[:4], self.vpp_session.peer_addr_n,
+ "Peer IPv4 address")
+ else:
+ self.assert_equal(e.local_addr, self.vpp_session.local_addr_n,
+ "Local IPv6 address")
+ self.assert_equal(e.peer_addr, self.vpp_session.peer_addr_n,
+ "Peer IPv6 address")
+ self.assert_equal(e.state, expected_state, BFDState)
+
+ def wait_for_bfd_packet(self, timeout=1):
+ self.logger.info("BFD: Waiting for BFD packet")
+ p = self.pg0.wait_for_packet(timeout=timeout)
+ bfd = p[BFD]
+ if bfd is None:
+ raise Exception(ppp("Unexpected or invalid BFD packet:", p))
+ if bfd.payload:
+ raise Exception(ppp("Unexpected payload in BFD packet:", bfd))
+ verify_ip(self, p, self.pg0.local_ip4, self.pg0.remote_ip4)
+ verify_udp(self, p)
+ self.test_session.verify_packet(p)
+ return p
+
+ def test_slow_timer(self):
+ """ verify slow periodic control frames while session down """
+ self.pg_enable_capture([self.pg0])
+ expected_packets = 3
+ self.logger.info("BFD: Waiting for %d BFD packets" % expected_packets)
+ self.wait_for_bfd_packet()
+ for i in range(expected_packets):
+ before = time.time()
+ self.wait_for_bfd_packet()
+ after = time.time()
+ # spec says the range should be <0.75, 1>, allow extra 0.05 margin
+ # to work around timing issues
+ self.assert_in_range(
+ after - before, 0.70, 1.05, "time between slow packets")
+ before = after
+
+ def test_zero_remote_min_rx(self):
+ """ no packets when zero BFD RemoteMinRxInterval """
+ self.pg_enable_capture([self.pg0])
+ p = self.wait_for_bfd_packet()
+ self.test_session.update(my_discriminator=randint(0, 40000000),
+ your_discriminator=p[BFD].my_discriminator,
+ state=BFDState.init,
+ required_min_rx_interval=0)
+ self.test_session.send_packet()
+ e = self.vapi.wait_for_event(1, "bfd_udp_session_details")
+ self.verify_event(e, expected_state=BFDState.up)
+
+ try:
+ p = self.pg0.wait_for_packet(timeout=1)
+ except:
+ return
+ raise Exception(ppp("Received unexpected BFD packet:", p))
+
+ def bfd_session_up(self):
+ self.pg_enable_capture([self.pg0])
+ self.logger.info("BFD: Waiting for slow hello")
+ p = self.wait_for_bfd_packet()
+ self.logger.info("BFD: Sending Init")
+ self.test_session.update(my_discriminator=randint(0, 40000000),
+ your_discriminator=p[BFD].my_discriminator,
+ state=BFDState.init,
+ required_min_rx_interval=100000)
+ self.test_session.send_packet()
+ self.logger.info("BFD: Waiting for event")
+ e = self.vapi.wait_for_event(1, "bfd_udp_session_details")
+ self.verify_event(e, expected_state=BFDState.up)
+ self.logger.info("BFD: Session is Up")
+ self.test_session.update(state=BFDState.up)
+
+ def test_session_up(self):
+ """ bring BFD session up """
+ self.bfd_session_up()
+
+ def test_hold_up(self):
+ """ hold BFD session up """
+ self.bfd_session_up()
+ for i in range(5):
+ self.wait_for_bfd_packet()
+ self.test_session.send_packet()
+
+ def test_conn_down(self):
+ """ verify session goes down after inactivity """
+ self.bfd_session_up()
+ self.wait_for_bfd_packet()
+ self.assert_equal(len(self.vapi.collect_events()), 0,
+ "number of bfd events")
+ self.wait_for_bfd_packet()
+ self.assert_equal(len(self.vapi.collect_events()), 0,
+ "number of bfd events")
+ e = self.vapi.wait_for_event(1, "bfd_udp_session_details")
+ self.verify_event(e, expected_state=BFDState.down)
+
+ def test_large_required_min_rx(self):
+ """ large remote RequiredMinRxInterval """
+ self.bfd_session_up()
+ interval = 3000000
+ self.test_session.update(required_min_rx_interval=interval)
+ self.test_session.send_packet()
+ now = time.time()
+ count = 0
+ while time.time() < now + interval / 1000000:
+ try:
+ p = self.wait_for_bfd_packet()
+ if count > 1:
+ self.logger.error(ppp("Received unexpected packet:", p))
+ count += 1
+ except:
+ pass
+ self.assert_in_range(count, 0, 1, "number of packets received")
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/vpp/test/test_classifier.py b/vpp/test/test_classifier.py
new file mode 100644
index 00000000..0923387c
--- /dev/null
+++ b/vpp/test/test_classifier.py
@@ -0,0 +1,343 @@
+#!/usr/bin/env python
+
+import unittest
+import socket
+import binascii
+
+from framework import VppTestCase, VppTestRunner
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, UDP
+from util import ppp
+
+class TestClassifier(VppTestCase):
+ """ Classifier Test Case """
+
+ def setUp(self):
+ """
+ Perform test setup before test case.
+
+ **Config:**
+ - create 4 pg interfaces
+ - untagged pg0/pg1/pg2 interface
+ pg0 -------> pg1 (IP ACL)
+ \
+ ---> pg2 (MAC ACL))
+ \
+ -> pg3 (PBR)
+ - setup interfaces:
+ - put it into UP state
+ - set IPv4 addresses
+ - resolve neighbor address using ARP
+
+ :ivar list interfaces: pg interfaces.
+ :ivar list pg_if_packet_sizes: packet sizes in test.
+ :ivar dict acl_tbl_idx: ACL table index.
+ :ivar int pbr_vrfid: VRF id for PBR test.
+ """
+ super(TestClassifier, self).setUp()
+
+ # create 4 pg interfaces
+ self.create_pg_interfaces(range(4))
+
+ # packet sizes to test
+ self.pg_if_packet_sizes = [64, 9018]
+
+ self.interfaces = list(self.pg_interfaces)
+
+ # ACL & PBR vars
+ self.acl_tbl_idx = {}
+ self.pbr_vrfid = 200
+
+ # setup all interfaces
+ for intf in self.interfaces:
+ intf.admin_up()
+ intf.config_ip4()
+ intf.resolve_arp()
+
+ def tearDown(self):
+ """Run standard test teardown and acl related log."""
+ super(TestClassifier, self).tearDown()
+ if not self.vpp_dead:
+ self.logger.info(self.vapi.cli("show classify table verbose"))
+ self.logger.info(self.vapi.cli("show ip fib"))
+
+ def config_pbr_fib_entry(self, intf):
+ """Configure fib entry to route traffic toward PBR VRF table
+
+ :param VppInterface intf: destination interface to be routed for PBR.
+
+ """
+ addr_len = 24
+ self.vapi.ip_add_del_route(intf.local_ip4n,
+ addr_len,
+ intf.remote_ip4n,
+ table_id=self.pbr_vrfid)
+
+ def create_stream(self, src_if, dst_if, packet_sizes):
+ """Create input packet stream for defined interfaces.
+
+ :param VppInterface src_if: Source Interface for packet stream.
+ :param VppInterface dst_if: Destination Interface for packet stream.
+ :param list packet_sizes: packet size to test.
+ """
+ pkts = []
+ for size in packet_sizes:
+ info = self.create_packet_info(src_if.sw_if_index,
+ dst_if.sw_if_index)
+ payload = self.info_to_payload(info)
+ p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ IP(src=src_if.remote_ip4, dst=dst_if.remote_ip4) /
+ UDP(sport=1234, dport=5678) /
+ Raw(payload))
+ info.data = p.copy()
+ self.extend_packet(p, size)
+ pkts.append(p)
+ return pkts
+
+ def verify_capture(self, dst_if, capture):
+ """Verify captured input packet stream for defined interface.
+
+ :param VppInterface dst_if: Interface to verify captured packet stream.
+ :param list capture: Captured packet stream.
+ """
+ self.logger.info("Verifying capture on interface %s" % dst_if.name)
+ last_info = dict()
+ for i in self.interfaces:
+ last_info[i.sw_if_index] = None
+ dst_sw_if_index = dst_if.sw_if_index
+ for packet in capture:
+ try:
+ ip = packet[IP]
+ udp = packet[UDP]
+ payload_info = self.payload_to_info(str(packet[Raw]))
+ packet_index = payload_info.index
+ self.assertEqual(payload_info.dst, dst_sw_if_index)
+ self.logger.debug("Got packet on port %s: src=%u (id=%u)" %
+ (dst_if.name, payload_info.src, packet_index))
+ next_info = self.get_next_packet_info_for_interface2(
+ payload_info.src, dst_sw_if_index,
+ last_info[payload_info.src])
+ last_info[payload_info.src] = next_info
+ self.assertTrue(next_info is not None)
+ self.assertEqual(packet_index, next_info.index)
+ saved_packet = next_info.data
+ # Check standard fields
+ self.assertEqual(ip.src, saved_packet[IP].src)
+ self.assertEqual(ip.dst, saved_packet[IP].dst)
+ self.assertEqual(udp.sport, saved_packet[UDP].sport)
+ self.assertEqual(udp.dport, saved_packet[UDP].dport)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+ for i in self.interfaces:
+ remaining_packet = self.get_next_packet_info_for_interface2(
+ i.sw_if_index, dst_sw_if_index, last_info[i.sw_if_index])
+ self.assertTrue(remaining_packet is None,
+ "Interface %s: Packet expected from interface %s "
+ "didn't arrive" % (dst_if.name, i.name))
+
+ @staticmethod
+ def build_ip_mask(proto='', src_ip='', dst_ip='',
+ src_port='', dst_port=''):
+ """Build IP ACL mask data with hexstring format
+
+ :param str proto: protocol number <0-ff>
+ :param str src_ip: source ip address <0-ffffffff>
+ :param str dst_ip: destination ip address <0-ffffffff>
+ :param str src_port: source port number <0-ffff>
+ :param str dst_port: destination port number <0-ffff>
+ """
+
+ return ('{:0>20}{:0>12}{:0>8}{:0>12}{:0>4}'.format(proto, src_ip,
+ dst_ip, src_port, dst_port)).rstrip('0')
+
+ @staticmethod
+ def build_ip_match(proto='', src_ip='', dst_ip='',
+ src_port='', dst_port=''):
+ """Build IP ACL match data with hexstring format
+
+ :param str proto: protocol number with valid option "<0-ff>"
+ :param str src_ip: source ip address with format of "x.x.x.x"
+ :param str dst_ip: destination ip address with format of "x.x.x.x"
+ :param str src_port: source port number <0-ffff>
+ :param str dst_port: destination port number <0-ffff>
+ """
+ if src_ip: src_ip = socket.inet_aton(src_ip).encode('hex')
+ if dst_ip: dst_ip = socket.inet_aton(dst_ip).encode('hex')
+
+ return ('{:0>20}{:0>12}{:0>8}{:0>12}{:0>4}'.format(proto, src_ip,
+ dst_ip, src_port, dst_port)).rstrip('0')
+
+ @staticmethod
+ def build_mac_mask(dst_mac='', src_mac='', ether_type=''):
+ """Build MAC ACL mask data with hexstring format
+
+ :param str dst_mac: source MAC address <0-ffffffffffff>
+ :param str src_mac: destination MAC address <0-ffffffffffff>
+ :param str ether_type: ethernet type <0-ffff>
+ """
+
+ return ('{:0>12}{:0>12}{:0>4}'.format(dst_mac, src_mac,
+ ether_type)).rstrip('0')
+
+ @staticmethod
+ def build_mac_match(dst_mac='', src_mac='', ether_type=''):
+ """Build MAC ACL match data with hexstring format
+
+ :param str dst_mac: source MAC address <x:x:x:x:x:x>
+ :param str src_mac: destination MAC address <x:x:x:x:x:x>
+ :param str ether_type: ethernet type <0-ffff>
+ """
+ if dst_mac: dst_mac = dst_mac.replace(':', '')
+ if src_mac: src_mac = src_mac.replace(':', '')
+
+ return ('{:0>12}{:0>12}{:0>4}'.format(dst_mac, src_mac,
+ ether_type)).rstrip('0')
+
+ def create_classify_table(self, key, mask, data_offset=0, is_add=1):
+ """Create Classify Table
+
+ :param str key: key for classify table (ex, ACL name).
+ :param str mask: mask value for interested traffic.
+ :param int match_n_vectors:
+ :param int is_add: option to configure classify table.
+ - create(1) or delete(0)
+ """
+ r = self.vapi.classify_add_del_table(
+ is_add,
+ binascii.unhexlify(mask),
+ match_n_vectors=(len(mask)-1)//32 + 1,
+ miss_next_index=0,
+ current_data_flag=1,
+ current_data_offset=data_offset)
+ self.assertIsNotNone(r, msg='No response msg for add_del_table')
+ self.acl_tbl_idx[key] = r.new_table_index
+
+ def create_classify_session(self, intf, table_index, match,
+ pbr_option=0, vrfid=0, is_add=1):
+ """Create Classify Session
+
+ :param VppInterface intf: Interface to apply classify session.
+ :param int table_index: table index to identify classify table.
+ :param str match: matched value for interested traffic.
+ :param int pbr_action: enable/disable PBR feature.
+ :param int vrfid: VRF id.
+ :param int is_add: option to configure classify session.
+ - create(1) or delete(0)
+ """
+ r = self.vapi.classify_add_del_session(
+ is_add,
+ table_index,
+ binascii.unhexlify(match),
+ opaque_index=0,
+ action=pbr_option,
+ metadata=vrfid)
+ self.assertIsNotNone(r, msg='No response msg for add_del_session')
+
+ def input_acl_set_interface(self, intf, table_index, is_add=1):
+ """Configure Input ACL interface
+
+ :param VppInterface intf: Interface to apply Input ACL feature.
+ :param int table_index: table index to identify classify table.
+ :param int is_add: option to configure classify session.
+ - enable(1) or disable(0)
+ """
+ r = self.vapi.input_acl_set_interface(
+ is_add,
+ intf.sw_if_index,
+ ip4_table_index=table_index)
+ self.assertIsNotNone(r, msg='No response msg for acl_set_interface')
+
+ def test_acl_ip(self):
+ """ IP ACL test
+
+ Test scenario for basic IP ACL with source IP
+ - Create IPv4 stream for pg0 -> pg1 interface.
+ - Create ACL with source IP address.
+ - Send and verify received packets on pg1 interface.
+ """
+
+ # Basic ACL testing with source IP
+ pkts = self.create_stream(self.pg0, self.pg1, self.pg_if_packet_sizes)
+ self.pg0.add_stream(pkts)
+
+ self.create_classify_table('ip', self.build_ip_mask(src_ip='ffffffff'))
+ self.create_classify_session(self.pg0, self.acl_tbl_idx.get('ip'),
+ self.build_ip_match(src_ip=self.pg0.remote_ip4))
+ self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get('ip'))
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg1.get_capture()
+ self.verify_capture(self.pg1, pkts)
+ self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get('ip'), 0)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+ self.pg3.assert_nothing_captured(remark="packets forwarded")
+
+ def test_acl_mac(self):
+ """ MAC ACL test
+
+ Test scenario for basic MAC ACL with source MAC
+ - Create IPv4 stream for pg0 -> pg2 interface.
+ - Create ACL with source MAC address.
+ - Send and verify received packets on pg2 interface.
+ """
+
+ # Basic ACL testing with source MAC
+ pkts = self.create_stream(self.pg0, self.pg2, self.pg_if_packet_sizes)
+ self.pg0.add_stream(pkts)
+
+ self.create_classify_table('mac',
+ self.build_mac_mask(src_mac='ffffffffffff'), data_offset=-14)
+ self.create_classify_session(self.pg0, self.acl_tbl_idx.get('mac'),
+ self.build_mac_match(src_mac=self.pg0.remote_mac))
+ self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get('mac'))
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg2.get_capture()
+ self.verify_capture(self.pg2, pkts)
+ self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get('mac'), 0)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg1.assert_nothing_captured(remark="packets forwarded")
+ self.pg3.assert_nothing_captured(remark="packets forwarded")
+
+ def test_acl_pbr(self):
+ """ IP PBR test
+
+ Test scenario for PBR with source IP
+ - Create IPv4 stream for pg0 -> pg3 interface.
+ - Configure PBR fib entry for packet forwarding.
+ - Send and verify received packets on pg3 interface.
+ """
+
+ # PBR testing with source IP
+ pkts = self.create_stream(self.pg0, self.pg3, self.pg_if_packet_sizes)
+ self.pg0.add_stream(pkts)
+
+ self.create_classify_table('pbr', self.build_ip_mask(src_ip='ffffffff'))
+ pbr_option = 1
+ self.create_classify_session(self.pg0, self.acl_tbl_idx.get('pbr'),
+ self.build_ip_match(src_ip=self.pg0.remote_ip4),
+ pbr_option, self.pbr_vrfid)
+ self.config_pbr_fib_entry(self.pg3)
+ self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get('pbr'))
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg3.get_capture()
+ self.verify_capture(self.pg3, pkts)
+ self.input_acl_set_interface(self.pg0, self.acl_tbl_idx.get('pbr'), 0)
+ self.pg0.assert_nothing_captured(remark="packets forwarded")
+ self.pg1.assert_nothing_captured(remark="packets forwarded")
+ self.pg2.assert_nothing_captured(remark="packets forwarded")
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/vpp/test/test_fib.py b/vpp/test/test_fib.py
new file mode 100644
index 00000000..1e28e8f8
--- /dev/null
+++ b/vpp/test/test_fib.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+import unittest
+
+from framework import VppTestCase, VppTestRunner
+
+
+class TestFIB(VppTestCase):
+ """ FIB Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestFIB, cls).setUpClass()
+
+ def setUp(self):
+ super(TestFIB, self).setUp()
+
+ def tearDown(self):
+ super(TestFIB, self).tearDown()
+
+ def test_fib(self):
+ """ FIB Unit Tests """
+ error = self.vapi.cli("test fib")
+
+ if error:
+ self.logger.critical(error)
+ self.assertEqual(error.find("Failed"), -1)
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/vpp/test/test_gre.py b/vpp/test/test_gre.py
new file mode 100644
index 00000000..f00e4467
--- /dev/null
+++ b/vpp/test/test_gre.py
@@ -0,0 +1,672 @@
+#!/usr/bin/env python
+
+import unittest
+from logging import *
+
+from framework import VppTestCase, VppTestRunner
+from vpp_sub_interface import VppDot1QSubint
+from vpp_gre_interface import VppGreInterface
+from vpp_ip_route import IpRoute, RoutePath
+from vpp_papi_provider import L2_VTR_OP
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether, Dot1Q, GRE
+from scapy.layers.inet import IP, UDP
+from scapy.layers.inet6 import IPv6
+from scapy.volatile import RandMAC, RandIP
+
+from util import ppp, ppc
+
+
+class TestGRE(VppTestCase):
+ """ GRE Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestGRE, cls).setUpClass()
+
+ def setUp(self):
+ super(TestGRE, self).setUp()
+
+ # create 2 pg interfaces - set one in a non-default table.
+ self.create_pg_interfaces(range(2))
+
+ self.pg1.set_table_ip4(1)
+ for i in self.pg_interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+
+ def tearDown(self):
+ super(TestGRE, self).tearDown()
+
+ def create_stream_ip4(self, src_if, src_ip, dst_ip):
+ pkts = []
+ for i in range(0, 257):
+ info = self.create_packet_info(src_if.sw_if_index,
+ src_if.sw_if_index)
+ payload = self.info_to_payload(info)
+ p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ IP(src=src_ip, dst=dst_ip) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ info.data = p.copy()
+ pkts.append(p)
+ return pkts
+
+ def create_tunnel_stream_4o4(self, src_if,
+ tunnel_src, tunnel_dst,
+ src_ip, dst_ip):
+ pkts = []
+ for i in range(0, 257):
+ info = self.create_packet_info(src_if.sw_if_index,
+ src_if.sw_if_index)
+ payload = self.info_to_payload(info)
+ p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ IP(src=tunnel_src, dst=tunnel_dst) /
+ GRE() /
+ IP(src=src_ip, dst=dst_ip) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ info.data = p.copy()
+ pkts.append(p)
+ return pkts
+
+ def create_tunnel_stream_6o4(self, src_if,
+ tunnel_src, tunnel_dst,
+ src_ip, dst_ip):
+ pkts = []
+ for i in range(0, 257):
+ info = self.create_packet_info(src_if.sw_if_index,
+ src_if.sw_if_index)
+ payload = self.info_to_payload(info)
+ p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ IP(src=tunnel_src, dst=tunnel_dst) /
+ GRE() /
+ IPv6(src=src_ip, dst=dst_ip) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ info.data = p.copy()
+ pkts.append(p)
+ return pkts
+
+ def create_tunnel_stream_l2o4(self, src_if,
+ tunnel_src, tunnel_dst):
+ pkts = []
+ for i in range(0, 257):
+ info = self.create_packet_info(src_if.sw_if_index,
+ src_if.sw_if_index)
+ payload = self.info_to_payload(info)
+ p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ IP(src=tunnel_src, dst=tunnel_dst) /
+ GRE() /
+ Ether(dst=RandMAC('*:*:*:*:*:*'),
+ src=RandMAC('*:*:*:*:*:*')) /
+ IP(src=str(RandIP()), dst=str(RandIP())) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ info.data = p.copy()
+ pkts.append(p)
+ return pkts
+
+ def create_tunnel_stream_vlano4(self, src_if,
+ tunnel_src, tunnel_dst, vlan):
+ pkts = []
+ for i in range(0, 257):
+ info = self.create_packet_info(src_if.sw_if_index,
+ src_if.sw_if_index)
+ payload = self.info_to_payload(info)
+ p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ IP(src=tunnel_src, dst=tunnel_dst) /
+ GRE() /
+ Ether(dst=RandMAC('*:*:*:*:*:*'),
+ src=RandMAC('*:*:*:*:*:*')) /
+ Dot1Q(vlan=vlan) /
+ IP(src=str(RandIP()), dst=str(RandIP())) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ info.data = p.copy()
+ pkts.append(p)
+ return pkts
+
+ def verify_tunneled_4o4(self, src_if, capture, sent,
+ tunnel_src, tunnel_dst):
+
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ try:
+ tx = sent[i]
+ rx = capture[i]
+
+ tx_ip = tx[IP]
+ rx_ip = rx[IP]
+
+ self.assertEqual(rx_ip.src, tunnel_src)
+ self.assertEqual(rx_ip.dst, tunnel_dst)
+
+ rx_gre = rx[GRE]
+ rx_ip = rx_gre[IP]
+
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+ # IP processing post pop has decremented the TTL
+ self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
+
+ except:
+ self.logger.error(ppp("Rx:", rx))
+ self.logger.error(ppp("Tx:", tx))
+ raise
+
+ def verify_tunneled_l2o4(self, src_if, capture, sent,
+ tunnel_src, tunnel_dst):
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ try:
+ tx = sent[i]
+ rx = capture[i]
+
+ tx_ip = tx[IP]
+ rx_ip = rx[IP]
+
+ self.assertEqual(rx_ip.src, tunnel_src)
+ self.assertEqual(rx_ip.dst, tunnel_dst)
+
+ rx_gre = rx[GRE]
+ rx_l2 = rx_gre[Ether]
+ rx_ip = rx_l2[IP]
+ tx_gre = tx[GRE]
+ tx_l2 = tx_gre[Ether]
+ tx_ip = tx_l2[IP]
+
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+ # bridged, not L3 forwarded, so no TTL decrement
+ self.assertEqual(rx_ip.ttl, tx_ip.ttl)
+
+ except:
+ self.logger.error(ppp("Rx:", rx))
+ self.logger.error(ppp("Tx:", tx))
+ raise
+
+ def verify_tunneled_vlano4(self, src_if, capture, sent,
+ tunnel_src, tunnel_dst, vlan):
+ try:
+ self.assertEqual(len(capture), len(sent))
+ except:
+ ppc("Unexpected packets captured:", capture)
+ raise
+
+ for i in range(len(capture)):
+ try:
+ tx = sent[i]
+ rx = capture[i]
+
+ tx_ip = tx[IP]
+ rx_ip = rx[IP]
+
+ self.assertEqual(rx_ip.src, tunnel_src)
+ self.assertEqual(rx_ip.dst, tunnel_dst)
+
+ rx_gre = rx[GRE]
+ rx_l2 = rx_gre[Ether]
+ rx_vlan = rx_l2[Dot1Q]
+ rx_ip = rx_l2[IP]
+
+ self.assertEqual(rx_vlan.vlan, vlan)
+
+ tx_gre = tx[GRE]
+ tx_l2 = tx_gre[Ether]
+ tx_ip = tx_l2[IP]
+
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+ # bridged, not L3 forwarded, so no TTL decrement
+ self.assertEqual(rx_ip.ttl, tx_ip.ttl)
+
+ except:
+ self.logger.error(ppp("Rx:", rx))
+ self.logger.error(ppp("Tx:", tx))
+ raise
+
+ def verify_decapped_4o4(self, src_if, capture, sent):
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ try:
+ tx = sent[i]
+ rx = capture[i]
+
+ tx_ip = tx[IP]
+ rx_ip = rx[IP]
+ tx_gre = tx[GRE]
+ tx_ip = tx_gre[IP]
+
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+ # IP processing post pop has decremented the TTL
+ self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
+
+ except:
+ self.logger.error(ppp("Rx:", rx))
+ self.logger.error(ppp("Tx:", tx))
+ raise
+
+ def verify_decapped_6o4(self, src_if, capture, sent):
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ try:
+ tx = sent[i]
+ rx = capture[i]
+
+ tx_ip = tx[IP]
+ rx_ip = rx[IPv6]
+ tx_gre = tx[GRE]
+ tx_ip = tx_gre[IPv6]
+
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+ self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim)
+
+ except:
+ self.logger.error(ppp("Rx:", rx))
+ self.logger.error(ppp("Tx:", tx))
+ raise
+
+ def test_gre(self):
+ """ GRE tunnel Tests """
+
+ #
+ # Create an L3 GRE tunnel.
+ # - set it admin up
+ # - assign an IP Addres
+ # - Add a route via the tunnel
+ #
+ gre_if = VppGreInterface(self,
+ self.pg0.local_ip4,
+ "1.1.1.2")
+ gre_if.add_vpp_config()
+
+ #
+ # The double create (create the same tunnel twice) should fail,
+ # and we should still be able to use the original
+ #
+ try:
+ gre_if.add_vpp_config()
+ except Exception:
+ pass
+ else:
+ self.fail("Double GRE tunnel add does not fail")
+
+ gre_if.admin_up()
+ gre_if.config_ip4()
+
+ route_via_tun = IpRoute(self, "4.4.4.4", 32,
+ [RoutePath("0.0.0.0", gre_if.sw_if_index)])
+
+ route_via_tun.add_vpp_config()
+
+ #
+ # Send a packet stream that is routed into the tunnel
+ # - they are all dropped since the tunnel's desintation IP
+ # is unresolved - or resolves via the default route - which
+ # which is a drop.
+ #
+ tx = self.create_stream_ip4(self.pg0, "5.5.5.5", "4.4.4.4")
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ self.pg0.assert_nothing_captured(
+ remark="GRE packets forwarded without DIP resolved")
+
+ #
+ # Add a route that resolves the tunnel's destination
+ #
+ route_tun_dst = IpRoute(self, "1.1.1.2", 32,
+ [RoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index)])
+ route_tun_dst.add_vpp_config()
+
+ #
+ # Send a packet stream that is routed into the tunnel
+ # - packets are GRE encapped
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_ip4(self.pg0, "5.5.5.5", "4.4.4.4")
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_tunneled_4o4(self.pg0, rx, tx,
+ self.pg0.local_ip4, "1.1.1.2")
+
+ #
+ # Send tunneled packets that match the created tunnel and
+ # are decapped and forwarded
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_tunnel_stream_4o4(self.pg0,
+ "1.1.1.2",
+ self.pg0.local_ip4,
+ self.pg0.local_ip4,
+ self.pg0.remote_ip4)
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_decapped_4o4(self.pg0, rx, tx)
+
+ #
+ # Send tunneled packets that do not match the tunnel's src
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_tunnel_stream_4o4(self.pg0,
+ "1.1.1.3",
+ self.pg0.local_ip4,
+ self.pg0.local_ip4,
+ self.pg0.remote_ip4)
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ self.pg0.assert_nothing_captured(
+ remark="GRE packets forwarded despite no SRC address match")
+
+ #
+ # Configure IPv6 on the PG interface so we can route IPv6
+ # packets
+ #
+ self.pg0.config_ip6()
+ self.pg0.resolve_ndp()
+
+ #
+ # Send IPv6 tunnel encapslated packets
+ # - dropped since IPv6 is not enabled on the tunnel
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_tunnel_stream_6o4(self.pg0,
+ "1.1.1.2",
+ self.pg0.local_ip4,
+ self.pg0.local_ip6,
+ self.pg0.remote_ip6)
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ self.pg0.assert_nothing_captured(remark="IPv6 GRE packets forwarded "
+ "despite IPv6 not enabled on tunnel")
+
+ #
+ # Enable IPv6 on the tunnel
+ #
+ gre_if.config_ip6()
+
+ #
+ # Send IPv6 tunnel encapslated packets
+ # - forwarded since IPv6 is enabled on the tunnel
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_tunnel_stream_6o4(self.pg0,
+ "1.1.1.2",
+ self.pg0.local_ip4,
+ self.pg0.local_ip6,
+ self.pg0.remote_ip6)
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_decapped_6o4(self.pg0, rx, tx)
+
+ #
+ # test case cleanup
+ #
+ route_tun_dst.remove_vpp_config()
+ route_via_tun.remove_vpp_config()
+ gre_if.remove_vpp_config()
+
+ self.pg0.unconfig_ip6()
+
+ def test_gre_vrf(self):
+ """ GRE tunnel VRF Tests """
+
+ #
+ # Create an L3 GRE tunnel whose destination is in the non-default
+ # table. The underlay is thus non-default - the overlay is still
+ # the default.
+ # - set it admin up
+ # - assign an IP Addres
+ #
+ gre_if = VppGreInterface(self, self.pg1.local_ip4,
+ "2.2.2.2",
+ outer_fib_id=1)
+ gre_if.add_vpp_config()
+ gre_if.admin_up()
+ gre_if.config_ip4()
+
+ #
+ # Add a route via the tunnel - in the overlay
+ #
+ route_via_tun = IpRoute(self, "9.9.9.9", 32,
+ [RoutePath("0.0.0.0", gre_if.sw_if_index)])
+ route_via_tun.add_vpp_config()
+
+ #
+ # Add a route that resolves the tunnel's destination - in the
+ # underlay table
+ #
+ route_tun_dst = IpRoute(self, "2.2.2.2", 32, table_id=1,
+ paths=[RoutePath(self.pg1.remote_ip4,
+ self.pg1.sw_if_index)])
+ route_tun_dst.add_vpp_config()
+
+ #
+ # Send a packet stream that is routed into the tunnel
+ # packets are sent in on pg0 which is in the default table
+ # - packets are GRE encapped
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_ip4(self.pg0, "5.5.5.5", "9.9.9.9")
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg1.get_capture()
+ self.verify_tunneled_4o4(self.pg1, rx, tx,
+ self.pg1.local_ip4, "2.2.2.2")
+
+ #
+ # Send tunneled packets that match the created tunnel and
+ # are decapped and forwarded. This tests the decap lookup
+ # does not happen in the encap table
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_tunnel_stream_4o4(self.pg1,
+ "2.2.2.2",
+ self.pg1.local_ip4,
+ self.pg0.local_ip4,
+ self.pg0.remote_ip4)
+ self.pg1.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_decapped_4o4(self.pg0, rx, tx)
+
+ #
+ # test case cleanup
+ #
+ route_tun_dst.remove_vpp_config()
+ route_via_tun.remove_vpp_config()
+ gre_if.remove_vpp_config()
+
+ def test_gre_l2(self):
+ """ GRE tunnel L2 Tests """
+
+ #
+ # Add routes to resolve the tunnel destinations
+ #
+ route_tun1_dst = IpRoute(self, "2.2.2.2", 32,
+ [RoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index)])
+ route_tun2_dst = IpRoute(self, "2.2.2.3", 32,
+ [RoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index)])
+
+ route_tun1_dst.add_vpp_config()
+ route_tun2_dst.add_vpp_config()
+
+ #
+ # Create 2 L2 GRE tunnels and x-connect them
+ #
+ gre_if1 = VppGreInterface(self, self.pg0.local_ip4,
+ "2.2.2.2",
+ is_teb=1)
+ gre_if2 = VppGreInterface(self, self.pg0.local_ip4,
+ "2.2.2.3",
+ is_teb=1)
+ gre_if1.add_vpp_config()
+ gre_if2.add_vpp_config()
+
+ gre_if1.admin_up()
+ gre_if2.admin_up()
+
+ self.vapi.sw_interface_set_l2_xconnect(gre_if1.sw_if_index,
+ gre_if2.sw_if_index,
+ enable=1)
+ self.vapi.sw_interface_set_l2_xconnect(gre_if2.sw_if_index,
+ gre_if1.sw_if_index,
+ enable=1)
+
+ #
+ # Send in tunnel encapped L2. expect out tunnel encapped L2
+ # in both directions
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_tunnel_stream_l2o4(self.pg0,
+ "2.2.2.2",
+ self.pg0.local_ip4)
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_tunneled_l2o4(self.pg0, rx, tx,
+ self.pg0.local_ip4,
+ "2.2.2.3")
+
+ self.vapi.cli("clear trace")
+ tx = self.create_tunnel_stream_l2o4(self.pg0,
+ "2.2.2.3",
+ self.pg0.local_ip4)
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_tunneled_l2o4(self.pg0, rx, tx,
+ self.pg0.local_ip4,
+ "2.2.2.2")
+
+ self.vapi.sw_interface_set_l2_xconnect(gre_if1.sw_if_index,
+ gre_if2.sw_if_index,
+ enable=0)
+ self.vapi.sw_interface_set_l2_xconnect(gre_if2.sw_if_index,
+ gre_if1.sw_if_index,
+ enable=0)
+
+ #
+ # Create a VLAN sub-interfaces on the GRE TEB interfaces
+ # then x-connect them
+ #
+ gre_if_11 = VppDot1QSubint(self, gre_if1, 11)
+ gre_if_12 = VppDot1QSubint(self, gre_if2, 12)
+
+ # gre_if_11.add_vpp_config()
+ # gre_if_12.add_vpp_config()
+
+ gre_if_11.admin_up()
+ gre_if_12.admin_up()
+
+ self.vapi.sw_interface_set_l2_xconnect(gre_if_11.sw_if_index,
+ gre_if_12.sw_if_index,
+ enable=1)
+ self.vapi.sw_interface_set_l2_xconnect(gre_if_12.sw_if_index,
+ gre_if_11.sw_if_index,
+ enable=1)
+
+ #
+ # Configure both to pop thier respective VLAN tags,
+ # so that during the x-coonect they will subsequently push
+ #
+ self.vapi.sw_interface_set_l2_tag_rewrite(gre_if_12.sw_if_index,
+ L2_VTR_OP.L2_POP_1,
+ 12)
+ self.vapi.sw_interface_set_l2_tag_rewrite(gre_if_11.sw_if_index,
+ L2_VTR_OP.L2_POP_1,
+ 11)
+
+ #
+ # Send traffic in both directiond - expect the VLAN tags to
+ # be swapped.
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_tunnel_stream_vlano4(self.pg0,
+ "2.2.2.2",
+ self.pg0.local_ip4,
+ 11)
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_tunneled_vlano4(self.pg0, rx, tx,
+ self.pg0.local_ip4,
+ "2.2.2.3",
+ 12)
+
+ self.vapi.cli("clear trace")
+ tx = self.create_tunnel_stream_vlano4(self.pg0,
+ "2.2.2.3",
+ self.pg0.local_ip4,
+ 12)
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_tunneled_vlano4(self.pg0, rx, tx,
+ self.pg0.local_ip4,
+ "2.2.2.2",
+ 11)
+
+ #
+ # Cleanup Test resources
+ #
+ gre_if_11.remove_vpp_config()
+ gre_if_12.remove_vpp_config()
+ gre_if1.remove_vpp_config()
+ gre_if2.remove_vpp_config()
+ route_tun1_dst.add_vpp_config()
+ route_tun2_dst.add_vpp_config()
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/vpp/test/test_ip4.py b/vpp/test/test_ip4.py
new file mode 100644
index 00000000..f67c3b9c
--- /dev/null
+++ b/vpp/test/test_ip4.py
@@ -0,0 +1,469 @@
+#!/usr/bin/env python
+import random
+import socket
+import unittest
+
+from framework import VppTestCase, VppTestRunner
+from vpp_sub_interface import VppSubInterface, VppDot1QSubint, VppDot1ADSubint
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether, Dot1Q
+from scapy.layers.inet import IP, UDP
+from util import ppp
+
+
+class TestIPv4(VppTestCase):
+ """ IPv4 Test Case """
+
+ def setUp(self):
+ """
+ Perform test setup before test case.
+
+ **Config:**
+ - create 3 pg interfaces
+ - untagged pg0 interface
+ - Dot1Q subinterface on pg1
+ - Dot1AD subinterface on pg2
+ - setup interfaces:
+ - put it into UP state
+ - set IPv4 addresses
+ - resolve neighbor address using ARP
+ - configure 200 fib entries
+
+ :ivar list interfaces: pg interfaces and subinterfaces.
+ :ivar dict flows: IPv4 packet flows in test.
+ :ivar list pg_if_packet_sizes: packet sizes in test.
+ """
+ super(TestIPv4, self).setUp()
+
+ # create 3 pg interfaces
+ self.create_pg_interfaces(range(3))
+
+ # create 2 subinterfaces for pg1 and pg2
+ self.sub_interfaces = [
+ VppDot1QSubint(self, self.pg1, 100),
+ VppDot1ADSubint(self, self.pg2, 200, 300, 400)]
+
+ # packet flows mapping pg0 -> pg1.sub, pg2.sub, etc.
+ self.flows = dict()
+ self.flows[self.pg0] = [self.pg1.sub_if, self.pg2.sub_if]
+ self.flows[self.pg1.sub_if] = [self.pg0, self.pg2.sub_if]
+ self.flows[self.pg2.sub_if] = [self.pg0, self.pg1.sub_if]
+
+ # packet sizes
+ self.pg_if_packet_sizes = [64, 512, 1518, 9018]
+ self.sub_if_packet_sizes = [64, 512, 1518 + 4, 9018 + 4]
+
+ self.interfaces = list(self.pg_interfaces)
+ self.interfaces.extend(self.sub_interfaces)
+
+ # setup all interfaces
+ for i in self.interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+
+ # config 2M FIB entries
+ self.config_fib_entries(200)
+
+ def tearDown(self):
+ """Run standard test teardown and log ``show ip arp``."""
+ super(TestIPv4, self).tearDown()
+ if not self.vpp_dead:
+ self.logger.info(self.vapi.cli("show ip arp"))
+ # info(self.vapi.cli("show ip fib")) # many entries
+
+ def config_fib_entries(self, count):
+ """For each interface add to the FIB table *count* routes to
+ "10.0.0.1/32" destination with interface's local address as next-hop
+ address.
+
+ :param int count: Number of FIB entries.
+
+ - *TODO:* check if the next-hop address shouldn't be remote address
+ instead of local address.
+ """
+ n_int = len(self.interfaces)
+ percent = 0
+ counter = 0.0
+ dest_addr = socket.inet_pton(socket.AF_INET, "10.0.0.1")
+ dest_addr_len = 32
+ for i in self.interfaces:
+ next_hop_address = i.local_ip4n
+ for j in range(count / n_int):
+ self.vapi.ip_add_del_route(
+ dest_addr, dest_addr_len, next_hop_address)
+ counter += 1
+ if counter / count * 100 > percent:
+ self.logger.info("Configure %d FIB entries .. %d%% done" %
+ (count, percent))
+ percent += 1
+
+ def create_stream(self, src_if, packet_sizes):
+ """Create input packet stream for defined interface.
+
+ :param VppInterface src_if: Interface to create packet stream for.
+ :param list packet_sizes: Required packet sizes.
+ """
+ pkts = []
+ for i in range(0, 257):
+ dst_if = self.flows[src_if][i % 2]
+ info = self.create_packet_info(
+ src_if.sw_if_index, dst_if.sw_if_index)
+ payload = self.info_to_payload(info)
+ p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ IP(src=src_if.remote_ip4, dst=dst_if.remote_ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ info.data = p.copy()
+ if isinstance(src_if, VppSubInterface):
+ p = src_if.add_dot1_layer(p)
+ size = packet_sizes[(i // 2) % len(packet_sizes)]
+ self.extend_packet(p, size)
+ pkts.append(p)
+ return pkts
+
+ def verify_capture(self, dst_if, capture):
+ """Verify captured input packet stream for defined interface.
+
+ :param VppInterface dst_if: Interface to verify captured packet stream
+ for.
+ :param list capture: Captured packet stream.
+ """
+ self.logger.info("Verifying capture on interface %s" % dst_if.name)
+ last_info = dict()
+ for i in self.interfaces:
+ last_info[i.sw_if_index] = None
+ is_sub_if = False
+ dst_sw_if_index = dst_if.sw_if_index
+ if hasattr(dst_if, 'parent'):
+ is_sub_if = True
+ for packet in capture:
+ if is_sub_if:
+ # Check VLAN tags and Ethernet header
+ packet = dst_if.remove_dot1_layer(packet)
+ self.assertTrue(Dot1Q not in packet)
+ try:
+ ip = packet[IP]
+ udp = packet[UDP]
+ payload_info = self.payload_to_info(str(packet[Raw]))
+ packet_index = payload_info.index
+ self.assertEqual(payload_info.dst, dst_sw_if_index)
+ self.logger.debug("Got packet on port %s: src=%u (id=%u)" %
+ (dst_if.name, payload_info.src, packet_index))
+ next_info = self.get_next_packet_info_for_interface2(
+ payload_info.src, dst_sw_if_index,
+ last_info[payload_info.src])
+ last_info[payload_info.src] = next_info
+ self.assertTrue(next_info is not None)
+ self.assertEqual(packet_index, next_info.index)
+ saved_packet = next_info.data
+ # Check standard fields
+ self.assertEqual(ip.src, saved_packet[IP].src)
+ self.assertEqual(ip.dst, saved_packet[IP].dst)
+ self.assertEqual(udp.sport, saved_packet[UDP].sport)
+ self.assertEqual(udp.dport, saved_packet[UDP].dport)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+ for i in self.interfaces:
+ remaining_packet = self.get_next_packet_info_for_interface2(
+ i.sw_if_index, dst_sw_if_index, last_info[i.sw_if_index])
+ self.assertTrue(remaining_packet is None,
+ "Interface %s: Packet expected from interface %s "
+ "didn't arrive" % (dst_if.name, i.name))
+
+ def test_fib(self):
+ """ IPv4 FIB test
+
+ Test scenario:
+
+ - Create IPv4 stream for pg0 interface
+ - Create IPv4 tagged streams for pg1's and pg2's subinterface.
+ - Send and verify received packets on each interface.
+ """
+
+ pkts = self.create_stream(self.pg0, self.pg_if_packet_sizes)
+ self.pg0.add_stream(pkts)
+
+ for i in self.sub_interfaces:
+ pkts = self.create_stream(i, self.sub_if_packet_sizes)
+ i.parent.add_stream(pkts)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg0.get_capture()
+ self.verify_capture(self.pg0, pkts)
+
+ for i in self.sub_interfaces:
+ pkts = i.parent.get_capture()
+ self.verify_capture(i, pkts)
+
+
+class TestIPv4FibCrud(VppTestCase):
+ """ FIB - add/update/delete - ip4 routes
+
+ Test scenario:
+ - add 1k,
+ - del 100,
+ - add new 1k,
+ - del 1.5k
+
+ ..note:: Python API is to slow to add many routes, needs C code replacement.
+ """
+
+ def config_fib_many_to_one(self, start_dest_addr, next_hop_addr, count):
+ """
+
+ :param start_dest_addr:
+ :param next_hop_addr:
+ :param count:
+ :return list: added ips with 32 prefix
+ """
+ added_ips = []
+ dest_addr = int(
+ socket.inet_pton(socket.AF_INET, start_dest_addr).encode('hex'), 16)
+ dest_addr_len = 32
+ n_next_hop_addr = socket.inet_pton(socket.AF_INET, next_hop_addr)
+ for _ in range(count):
+ n_dest_addr = '{:08x}'.format(dest_addr).decode('hex')
+ self.vapi.ip_add_del_route(n_dest_addr, dest_addr_len,
+ n_next_hop_addr)
+ added_ips.append(socket.inet_ntoa(n_dest_addr))
+ dest_addr += 1
+ return added_ips
+
+ def unconfig_fib_many_to_one(self, start_dest_addr, next_hop_addr, count):
+
+ removed_ips = []
+ dest_addr = int(
+ socket.inet_pton(socket.AF_INET, start_dest_addr).encode('hex'), 16)
+ dest_addr_len = 32
+ n_next_hop_addr = socket.inet_pton(socket.AF_INET, next_hop_addr)
+ for _ in range(count):
+ n_dest_addr = '{:08x}'.format(dest_addr).decode('hex')
+ self.vapi.ip_add_del_route(n_dest_addr, dest_addr_len,
+ n_next_hop_addr, is_add=0)
+ removed_ips.append(socket.inet_ntoa(n_dest_addr))
+ dest_addr += 1
+ return removed_ips
+
+ def create_stream(self, src_if, dst_if, dst_ips, count):
+ pkts = []
+
+ for _ in range(count):
+ dst_addr = random.choice(dst_ips)
+ info = self.create_packet_info(
+ src_if.sw_if_index, dst_if.sw_if_index)
+ payload = self.info_to_payload(info)
+ p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ IP(src=src_if.remote_ip4, dst=dst_addr) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ info.data = p.copy()
+ size = random.choice(self.pg_if_packet_sizes)
+ self.extend_packet(p, random.choice(self.pg_if_packet_sizes))
+ pkts.append(p)
+
+ return pkts
+
+ def _find_ip_match(self, find_in, pkt):
+ for p in find_in:
+ if self.payload_to_info(str(p[Raw])) == self.payload_to_info(str(pkt[Raw])):
+ if p[IP].src != pkt[IP].src:
+ break
+ if p[IP].dst != pkt[IP].dst:
+ break
+ if p[UDP].sport != pkt[UDP].sport:
+ break
+ if p[UDP].dport != pkt[UDP].dport:
+ break
+ return p
+ return None
+
+ @staticmethod
+ def _match_route_detail(route_detail, ip, address_length=32, table_id=0):
+ if route_detail.address == socket.inet_pton(socket.AF_INET, ip):
+ if route_detail.table_id != table_id:
+ return False
+ elif route_detail.address_length != address_length:
+ return False
+ else:
+ return True
+ else:
+ return False
+
+ def verify_capture(self, dst_interface, received_pkts, expected_pkts):
+ self.assertEqual(len(received_pkts), len(expected_pkts))
+ to_verify = list(expected_pkts)
+ for p in received_pkts:
+ self.assertEqual(p.src, dst_interface.local_mac)
+ self.assertEqual(p.dst, dst_interface.remote_mac)
+ x = self._find_ip_match(to_verify, p)
+ to_verify.remove(x)
+ self.assertListEqual(to_verify, [])
+
+ def verify_route_dump(self, fib_dump, ips):
+
+ def _ip_in_route_dump(ip, fib_dump):
+ return next((route for route in fib_dump
+ if self._match_route_detail(route, ip)),
+ False)
+
+ for ip in ips:
+ self.assertTrue(_ip_in_route_dump(ip, fib_dump),
+ 'IP {} is not in fib dump.'.format(ip))
+
+ def verify_not_in_route_dump(self, fib_dump, ips):
+
+ def _ip_in_route_dump(ip, fib_dump):
+ return next((route for route in fib_dump
+ if self._match_route_detail(route, ip)),
+ False)
+
+ for ip in ips:
+ self.assertFalse(_ip_in_route_dump(ip, fib_dump),
+ 'IP {} is in fib dump.'.format(ip))
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ #. Create and initialize 3 pg interfaces.
+ #. initialize class attributes configured_routes and deleted_routes
+ to store information between tests.
+ """
+ super(TestIPv4FibCrud, cls).setUpClass()
+
+ try:
+ # create 3 pg interfaces
+ cls.create_pg_interfaces(range(3))
+
+ cls.interfaces = list(cls.pg_interfaces)
+
+ # setup all interfaces
+ for i in cls.interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+
+ cls.configured_routes = []
+ cls.deleted_routes = []
+ cls.pg_if_packet_sizes = [64, 512, 1518, 9018]
+
+ except Exception:
+ super(TestIPv4FibCrud, cls).tearDownClass()
+ raise
+
+ def setUp(self):
+ super(TestIPv4FibCrud, self).setUp()
+ self.packet_infos = {}
+
+ def test_1_add_routes(self):
+ """ Add 1k routes
+
+ - add 100 routes check with traffic script.
+ """
+ # config 1M FIB entries
+ self.configured_routes.extend(self.config_fib_many_to_one(
+ "10.0.0.0", self.pg0.remote_ip4, 100))
+
+ fib_dump = self.vapi.ip_fib_dump()
+ self.verify_route_dump(fib_dump, self.configured_routes)
+
+ self.stream_1 = self.create_stream(
+ self.pg1, self.pg0, self.configured_routes, 100)
+ self.stream_2 = self.create_stream(
+ self.pg2, self.pg0, self.configured_routes, 100)
+ self.pg1.add_stream(self.stream_1)
+ self.pg2.add_stream(self.stream_2)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg0.get_capture()
+ self.verify_capture(self.pg0, pkts, self.stream_1 + self.stream_2)
+
+
+ def test_2_del_routes(self):
+ """ Delete 100 routes
+
+ - delete 10 routes check with traffic script.
+ """
+ self.deleted_routes.extend(self.unconfig_fib_many_to_one(
+ "10.0.0.10", self.pg0.remote_ip4, 10))
+ for x in self.deleted_routes:
+ self.configured_routes.remove(x)
+
+ fib_dump = self.vapi.ip_fib_dump()
+ self.verify_route_dump(fib_dump, self.configured_routes)
+
+ self.stream_1 = self.create_stream(
+ self.pg1, self.pg0, self.configured_routes, 100)
+ self.stream_2 = self.create_stream(
+ self.pg2, self.pg0, self.configured_routes, 100)
+ self.stream_3 = self.create_stream(
+ self.pg1, self.pg0, self.deleted_routes, 100)
+ self.stream_4 = self.create_stream(
+ self.pg2, self.pg0, self.deleted_routes, 100)
+ self.pg1.add_stream(self.stream_1 + self.stream_3)
+ self.pg2.add_stream(self.stream_2 + self.stream_4)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg0.get_capture()
+ self.verify_capture(self.pg0, pkts, self.stream_1 + self.stream_2)
+
+ def test_3_add_new_routes(self):
+ """ Add 1k routes
+
+ - re-add 5 routes check with traffic script.
+ - add 100 routes check with traffic script.
+ """
+ tmp = self.config_fib_many_to_one(
+ "10.0.0.10", self.pg0.remote_ip4, 5)
+ self.configured_routes.extend(tmp)
+ for x in tmp:
+ self.deleted_routes.remove(x)
+
+ self.configured_routes.extend(self.config_fib_many_to_one(
+ "10.0.1.0", self.pg0.remote_ip4, 100))
+
+ fib_dump = self.vapi.ip_fib_dump()
+ self.verify_route_dump(fib_dump, self.configured_routes)
+
+ self.stream_1 = self.create_stream(
+ self.pg1, self.pg0, self.configured_routes, 300)
+ self.stream_2 = self.create_stream(
+ self.pg2, self.pg0, self.configured_routes, 300)
+ self.stream_3 = self.create_stream(
+ self.pg1, self.pg0, self.deleted_routes, 100)
+ self.stream_4 = self.create_stream(
+ self.pg2, self.pg0, self.deleted_routes, 100)
+
+ self.pg1.add_stream(self.stream_1 + self.stream_3)
+ self.pg2.add_stream(self.stream_2 + self.stream_4)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg0.get_capture()
+ self.verify_capture(self.pg0, pkts, self.stream_1 + self.stream_2)
+
+ def test_4_del_routes(self):
+ """ Delete 1.5k routes
+
+ - delete 5 routes check with traffic script.
+ - add 100 routes check with traffic script.
+ """
+ self.deleted_routes.extend(self.unconfig_fib_many_to_one(
+ "10.0.0.0", self.pg0.remote_ip4, 15))
+ self.deleted_routes.extend(self.unconfig_fib_many_to_one(
+ "10.0.0.20", self.pg0.remote_ip4, 85))
+ self.deleted_routes.extend(self.unconfig_fib_many_to_one(
+ "10.0.1.0", self.pg0.remote_ip4, 100))
+ fib_dump = self.vapi.ip_fib_dump()
+ self.verify_not_in_route_dump(fib_dump, self.deleted_routes)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/vpp/test/test_ip4_irb.py b/vpp/test/test_ip4_irb.py
new file mode 100644
index 00000000..cf2bb150
--- /dev/null
+++ b/vpp/test/test_ip4_irb.py
@@ -0,0 +1,266 @@
+#!/usr/bin/env python
+"""IRB Test Case HLD:
+
+**config**
+ - L2 MAC learning enabled in l2bd
+ - 2 routed interfaces untagged, bvi (Bridge Virtual Interface)
+ - 2 bridged interfaces in l2bd with bvi
+
+**test**
+ - sending ip4 eth pkts between routed interfaces
+ - 2 routed interfaces
+ - 2 bridged interfaces
+
+ - 64B, 512B, 1518B, 9200B (ether_size)
+
+ - burst of pkts per interface
+ - 257pkts per burst
+ - routed pkts hitting different FIB entries
+ - bridged pkts hitting different MAC entries
+
+**verify**
+ - all packets received correctly
+
+"""
+
+import unittest
+from random import choice
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, UDP
+
+from framework import VppTestCase, VppTestRunner
+
+
+class TestIpIrb(VppTestCase):
+ """IRB Test Case"""
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ #. Create BD with MAC learning enabled and put interfaces to this BD.
+ #. Configure IPv4 addresses on loopback interface and routed interface.
+ #. Configure MAC address binding to IPv4 neighbors on loop0.
+ #. Configure MAC address on pg2.
+ #. Loopback BVI interface has remote hosts, one half of hosts are
+ behind pg0 second behind pg1.
+ """
+ super(TestIpIrb, cls).setUpClass()
+
+ cls.pg_if_packet_sizes = [64, 512, 1518, 9018] # packet sizes
+ cls.bd_id = 10
+ cls.remote_hosts_count = 250
+
+ # create 3 pg interfaces, 1 loopback interface
+ cls.create_pg_interfaces(range(3))
+ cls.create_loopback_interfaces(range(1))
+
+ cls.interfaces = list(cls.pg_interfaces)
+ cls.interfaces.extend(cls.lo_interfaces)
+
+ for i in cls.interfaces:
+ i.admin_up()
+
+ # Create BD with MAC learning enabled and put interfaces to this BD
+ cls.vapi.sw_interface_set_l2_bridge(
+ cls.loop0.sw_if_index, bd_id=cls.bd_id, bvi=1)
+ cls.vapi.sw_interface_set_l2_bridge(
+ cls.pg0.sw_if_index, bd_id=cls.bd_id)
+ cls.vapi.sw_interface_set_l2_bridge(
+ cls.pg1.sw_if_index, bd_id=cls.bd_id)
+
+ # Configure IPv4 addresses on loopback interface and routed interface
+ cls.loop0.config_ip4()
+ cls.pg2.config_ip4()
+
+ # Configure MAC address binding to IPv4 neighbors on loop0
+ cls.loop0.generate_remote_hosts(cls.remote_hosts_count)
+ cls.loop0.configure_ipv4_neighbors()
+ # configure MAC address on pg2
+ cls.pg2.resolve_arp()
+
+ # Loopback BVI interface has remote hosts, one half of hosts are behind
+ # pg0 second behind pg1
+ half = cls.remote_hosts_count // 2
+ cls.pg0.remote_hosts = cls.loop0.remote_hosts[:half]
+ cls.pg1.remote_hosts = cls.loop0.remote_hosts[half:]
+
+ def tearDown(self):
+ """Run standard test teardown and log ``show l2patch``,
+ ``show l2fib verbose``,``show bridge-domain <bd_id> detail``,
+ ``show ip arp``.
+ """
+ super(TestIpIrb, self).tearDown()
+ if not self.vpp_dead:
+ self.logger.info(self.vapi.cli("show l2patch"))
+ self.logger.info(self.vapi.cli("show l2fib verbose"))
+ self.logger.info(self.vapi.cli("show bridge-domain %s detail" %
+ self.bd_id))
+ self.logger.info(self.vapi.cli("show ip arp"))
+
+ def create_stream(self, src_ip_if, dst_ip_if, packet_sizes):
+ pkts = []
+ for i in range(0, 257):
+ remote_dst_host = choice(dst_ip_if.remote_hosts)
+ info = self.create_packet_info(
+ src_ip_if.sw_if_index, dst_ip_if.sw_if_index)
+ payload = self.info_to_payload(info)
+ p = (Ether(dst=src_ip_if.local_mac, src=src_ip_if.remote_mac) /
+ IP(src=src_ip_if.remote_ip4,
+ dst=remote_dst_host.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ info.data = p.copy()
+ size = packet_sizes[(i // 2) % len(packet_sizes)]
+ self.extend_packet(p, size)
+ pkts.append(p)
+ return pkts
+
+ def create_stream_l2_to_ip(self, src_l2_if, src_ip_if, dst_ip_if,
+ packet_sizes):
+ pkts = []
+ for i in range(0, 257):
+ info = self.create_packet_info(
+ src_ip_if.sw_if_index, dst_ip_if.sw_if_index)
+ payload = self.info_to_payload(info)
+
+ host = choice(src_l2_if.remote_hosts)
+
+ p = (Ether(src=host.mac,
+ dst = src_ip_if.local_mac) /
+ IP(src=host.ip4,
+ dst=dst_ip_if.remote_ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+
+ info.data = p.copy()
+ size = packet_sizes[(i // 2) % len(packet_sizes)]
+ self.extend_packet(p, size)
+
+ pkts.append(p)
+ return pkts
+
+ def verify_capture_l2_to_ip(self, dst_ip_if, src_ip_if, capture):
+ last_info = dict()
+ for i in self.interfaces:
+ last_info[i.sw_if_index] = None
+
+ dst_ip_sw_if_index = dst_ip_if.sw_if_index
+
+ for packet in capture:
+ ip = packet[IP]
+ udp = packet[IP][UDP]
+ payload_info = self.payload_to_info(str(packet[IP][UDP][Raw]))
+ packet_index = payload_info.index
+
+ self.assertEqual(payload_info.dst, dst_ip_sw_if_index)
+
+ next_info = self.get_next_packet_info_for_interface2(
+ payload_info.src, dst_ip_sw_if_index,
+ last_info[payload_info.src])
+ last_info[payload_info.src] = next_info
+ self.assertTrue(next_info is not None)
+ saved_packet = next_info.data
+ self.assertTrue(next_info is not None)
+
+ # MAC: src, dst
+ self.assertEqual(packet.src, dst_ip_if.local_mac)
+ self.assertEqual(packet.dst, dst_ip_if.remote_mac)
+
+ # IP: src, dst
+ host = src_ip_if.host_by_ip4(ip.src)
+ self.assertIsNotNone(host)
+ self.assertEqual(ip.dst, saved_packet[IP].dst)
+ self.assertEqual(ip.dst, dst_ip_if.remote_ip4)
+
+ # UDP:
+ self.assertEqual(udp.sport, saved_packet[UDP].sport)
+ self.assertEqual(udp.dport, saved_packet[UDP].dport)
+
+ def verify_capture(self, dst_ip_if, src_ip_if, capture):
+ last_info = dict()
+ for i in self.interfaces:
+ last_info[i.sw_if_index] = None
+
+ dst_ip_sw_if_index = dst_ip_if.sw_if_index
+
+ for packet in capture:
+ ip = packet[IP]
+ udp = packet[IP][UDP]
+ payload_info = self.payload_to_info(str(packet[IP][UDP][Raw]))
+ packet_index = payload_info.index
+
+ self.assertEqual(payload_info.dst, dst_ip_sw_if_index)
+
+ next_info = self.get_next_packet_info_for_interface2(
+ payload_info.src, dst_ip_sw_if_index,
+ last_info[payload_info.src])
+ last_info[payload_info.src] = next_info
+ self.assertTrue(next_info is not None)
+ self.assertEqual(packet_index, next_info.index)
+ saved_packet = next_info.data
+ self.assertTrue(next_info is not None)
+
+ # MAC: src, dst
+ self.assertEqual(packet.src, dst_ip_if.local_mac)
+ host = dst_ip_if.host_by_mac(packet.dst)
+
+ # IP: src, dst
+ self.assertEqual(ip.src, src_ip_if.remote_ip4)
+ self.assertEqual(ip.dst, saved_packet[IP].dst)
+ self.assertEqual(ip.dst, host.ip4)
+
+ # UDP:
+ self.assertEqual(udp.sport, saved_packet[UDP].sport)
+ self.assertEqual(udp.dport, saved_packet[UDP].dport)
+
+ def test_ip4_irb_1(self):
+ """ IPv4 IRB test 1
+
+ Test scenario:
+ - ip traffic from pg2 interface must ends in both pg0 and pg1
+ - arp entry present in loop0 interface for destination IP
+ - no l2 entree configured, pg0 and pg1 are same
+ """
+
+ stream = self.create_stream(
+ self.pg2, self.loop0, self.pg_if_packet_sizes)
+ self.pg2.add_stream(stream)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rcvd1 = self.pg0.get_capture()
+ rcvd2 = self.pg1.get_capture()
+
+ self.verify_capture(self.loop0, self.pg2, rcvd1)
+ self.verify_capture(self.loop0, self.pg2, rcvd2)
+
+ self.assertListEqual(rcvd1.res, rcvd2.res)
+
+ def test_ip4_irb_2(self):
+ """ IPv4 IRB test 2
+
+ Test scenario:
+ - ip traffic from pg0 and pg1 ends on pg2
+ """
+
+ stream1 = self.create_stream_l2_to_ip(
+ self.pg0, self.loop0, self.pg2, self.pg_if_packet_sizes)
+ stream2 = self.create_stream_l2_to_ip(
+ self.pg1, self.loop0, self.pg2, self.pg_if_packet_sizes)
+ self.pg0.add_stream(stream1)
+ self.pg1.add_stream(stream2)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rcvd = self.pg2.get_capture()
+ self.verify_capture_l2_to_ip(self.pg2, self.loop0, rcvd)
+
+ self.assertEqual(len(stream1) + len(stream2), len(rcvd.res))
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/vpp/test/test_ip6.py b/vpp/test/test_ip6.py
new file mode 100644
index 00000000..06b15f94
--- /dev/null
+++ b/vpp/test/test_ip6.py
@@ -0,0 +1,212 @@
+#!/usr/bin/env python
+
+import unittest
+import socket
+
+from framework import VppTestCase, VppTestRunner
+from vpp_sub_interface import VppSubInterface, VppDot1QSubint
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether, Dot1Q
+from scapy.layers.inet6 import IPv6, UDP
+from util import ppp
+
+
+class TestIPv6(VppTestCase):
+ """ IPv6 Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestIPv6, cls).setUpClass()
+
+ def setUp(self):
+ """
+ Perform test setup before test case.
+
+ **Config:**
+ - create 3 pg interfaces
+ - untagged pg0 interface
+ - Dot1Q subinterface on pg1
+ - Dot1AD subinterface on pg2
+ - setup interfaces:
+ - put it into UP state
+ - set IPv6 addresses
+ - resolve neighbor address using NDP
+ - configure 200 fib entries
+
+ :ivar list interfaces: pg interfaces and subinterfaces.
+ :ivar dict flows: IPv4 packet flows in test.
+ :ivar list pg_if_packet_sizes: packet sizes in test.
+
+ *TODO:* Create AD sub interface
+ """
+ super(TestIPv6, self).setUp()
+
+ # create 3 pg interfaces
+ self.create_pg_interfaces(range(3))
+
+ # create 2 subinterfaces for p1 and pg2
+ self.sub_interfaces = [
+ VppDot1QSubint(self, self.pg1, 100),
+ VppDot1QSubint(self, self.pg2, 200)
+ # TODO: VppDot1ADSubint(self, self.pg2, 200, 300, 400)
+ ]
+
+ # packet flows mapping pg0 -> pg1.sub, pg2.sub, etc.
+ self.flows = dict()
+ self.flows[self.pg0] = [self.pg1.sub_if, self.pg2.sub_if]
+ self.flows[self.pg1.sub_if] = [self.pg0, self.pg2.sub_if]
+ self.flows[self.pg2.sub_if] = [self.pg0, self.pg1.sub_if]
+
+ # packet sizes
+ self.pg_if_packet_sizes = [64, 512, 1518, 9018]
+ self.sub_if_packet_sizes = [64, 512, 1518 + 4, 9018 + 4]
+
+ self.interfaces = list(self.pg_interfaces)
+ self.interfaces.extend(self.sub_interfaces)
+
+ # setup all interfaces
+ for i in self.interfaces:
+ i.admin_up()
+ i.config_ip6()
+ i.resolve_ndp()
+
+ # config 2M FIB entries
+ self.config_fib_entries(200)
+
+ def tearDown(self):
+ """Run standard test teardown and log ``show ip6 neighbors``."""
+ super(TestIPv6, self).tearDown()
+ if not self.vpp_dead:
+ self.logger.info(self.vapi.cli("show ip6 neighbors"))
+ # info(self.vapi.cli("show ip6 fib")) # many entries
+
+ def config_fib_entries(self, count):
+ """For each interface add to the FIB table *count* routes to
+ "fd02::1/128" destination with interface's local address as next-hop
+ address.
+
+ :param int count: Number of FIB entries.
+
+ - *TODO:* check if the next-hop address shouldn't be remote address
+ instead of local address.
+ """
+ n_int = len(self.interfaces)
+ percent = 0
+ counter = 0.0
+ dest_addr = socket.inet_pton(socket.AF_INET6, "fd02::1")
+ dest_addr_len = 128
+ for i in self.interfaces:
+ next_hop_address = i.local_ip6n
+ for j in range(count / n_int):
+ self.vapi.ip_add_del_route(
+ dest_addr, dest_addr_len, next_hop_address, is_ipv6=1)
+ counter += 1
+ if counter / count * 100 > percent:
+ self.logger.info("Configure %d FIB entries .. %d%% done" %
+ (count, percent))
+ percent += 1
+
+ def create_stream(self, src_if, packet_sizes):
+ """Create input packet stream for defined interface.
+
+ :param VppInterface src_if: Interface to create packet stream for.
+ :param list packet_sizes: Required packet sizes.
+ """
+ pkts = []
+ for i in range(0, 257):
+ dst_if = self.flows[src_if][i % 2]
+ info = self.create_packet_info(
+ src_if.sw_if_index, dst_if.sw_if_index)
+ payload = self.info_to_payload(info)
+ p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ IPv6(src=src_if.remote_ip6, dst=dst_if.remote_ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ info.data = p.copy()
+ if isinstance(src_if, VppSubInterface):
+ p = src_if.add_dot1_layer(p)
+ size = packet_sizes[(i // 2) % len(packet_sizes)]
+ self.extend_packet(p, size)
+ pkts.append(p)
+ return pkts
+
+ def verify_capture(self, dst_if, capture):
+ """Verify captured input packet stream for defined interface.
+
+ :param VppInterface dst_if: Interface to verify captured packet stream
+ for.
+ :param list capture: Captured packet stream.
+ """
+ self.logger.info("Verifying capture on interface %s" % dst_if.name)
+ last_info = dict()
+ for i in self.interfaces:
+ last_info[i.sw_if_index] = None
+ is_sub_if = False
+ dst_sw_if_index = dst_if.sw_if_index
+ if hasattr(dst_if, 'parent'):
+ is_sub_if = True
+ for packet in capture:
+ if is_sub_if:
+ # Check VLAN tags and Ethernet header
+ packet = dst_if.remove_dot1_layer(packet)
+ self.assertTrue(Dot1Q not in packet)
+ try:
+ ip = packet[IPv6]
+ udp = packet[UDP]
+ payload_info = self.payload_to_info(str(packet[Raw]))
+ packet_index = payload_info.index
+ self.assertEqual(payload_info.dst, dst_sw_if_index)
+ self.logger.debug("Got packet on port %s: src=%u (id=%u)" %
+ (dst_if.name, payload_info.src, packet_index))
+ next_info = self.get_next_packet_info_for_interface2(
+ payload_info.src, dst_sw_if_index,
+ last_info[payload_info.src])
+ last_info[payload_info.src] = next_info
+ self.assertTrue(next_info is not None)
+ self.assertEqual(packet_index, next_info.index)
+ saved_packet = next_info.data
+ # Check standard fields
+ self.assertEqual(ip.src, saved_packet[IPv6].src)
+ self.assertEqual(ip.dst, saved_packet[IPv6].dst)
+ self.assertEqual(udp.sport, saved_packet[UDP].sport)
+ self.assertEqual(udp.dport, saved_packet[UDP].dport)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+ for i in self.interfaces:
+ remaining_packet = self.get_next_packet_info_for_interface2(
+ i.sw_if_index, dst_sw_if_index, last_info[i.sw_if_index])
+ self.assertTrue(remaining_packet is None,
+ "Interface %s: Packet expected from interface %s "
+ "didn't arrive" % (dst_if.name, i.name))
+
+ def test_fib(self):
+ """ IPv6 FIB test
+
+ Test scenario:
+ - Create IPv6 stream for pg0 interface
+ - Create IPv6 tagged streams for pg1's and pg2's subinterface.
+ - Send and verify received packets on each interface.
+ """
+
+ pkts = self.create_stream(self.pg0, self.pg_if_packet_sizes)
+ self.pg0.add_stream(pkts)
+
+ for i in self.sub_interfaces:
+ pkts = self.create_stream(i, self.sub_if_packet_sizes)
+ i.parent.add_stream(pkts)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ pkts = self.pg0.get_capture()
+ self.verify_capture(self.pg0, pkts)
+
+ for i in self.sub_interfaces:
+ pkts = i.parent.get_capture()
+ self.verify_capture(i, pkts)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/vpp/test/test_l2_fib.py b/vpp/test/test_l2_fib.py
new file mode 100644
index 00000000..4855a3ea
--- /dev/null
+++ b/vpp/test/test_l2_fib.py
@@ -0,0 +1,382 @@
+#!/usr/bin/env python
+"""L2 FIB Test Case HLD:
+
+**config 1**
+ - add 4 pg-l2 interfaces
+ - configure them into l2bd
+ - configure 100 MAC entries in L2 fib - 25 MACs per interface
+ - L2 MAC learning and unknown unicast flooding disabled in l2bd
+ - configure 100 MAC entries in L2 fib - 25 MACs per interface
+
+**test 1**
+ - send L2 MAC frames between all 4 pg-l2 interfaces for all of 100 MAC \
+ entries in the FIB
+
+**verify 1**
+ - all packets received correctly
+
+**config 2**
+ - delete 12 MAC entries - 3 MACs per interface
+
+**test 2a**
+ - send L2 MAC frames between all 4 pg-l2 interfaces for non-deleted MAC \
+ entries
+
+**verify 2a**
+ - all packets received correctly
+
+**test 2b**
+ - send L2 MAC frames between all 4 pg-l2 interfaces for all of 12 deleted \
+ MAC entries
+
+**verify 2b**
+ - no packet received on all 4 pg-l2 interfaces
+
+**config 3**
+ - configure new 100 MAC entries in L2 fib - 25 MACs per interface
+
+**test 3**
+ - send L2 MAC frames between all 4 pg-l2 interfaces for all of 188 MAC \
+ entries in the FIB
+
+**verify 3**
+ - all packets received correctly
+
+**config 4**
+ - delete 160 MAC entries, 40 MACs per interface
+
+**test 4a**
+ - send L2 MAC frames between all 4 pg-l2 interfaces for all of 28 \
+ non-deleted MAC entries
+
+**verify 4a**
+ - all packets received correctly
+
+**test 4b**
+ - try send L2 MAC frames between all 4 pg-l2 interfaces for all of 172 \
+ deleted MAC entries
+
+**verify 4b**
+ - no packet received on all 4 pg-l2 interfaces
+"""
+
+import unittest
+import random
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, UDP
+
+from framework import VppTestCase, VppTestRunner
+from util import Host, ppp
+
+
+class TestL2fib(VppTestCase):
+ """ L2 FIB Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Perform standard class setup (defined by class method setUpClass in
+ class VppTestCase) before running the test case, set test case related
+ variables and configure VPP.
+
+ :var int bd_id: Bridge domain ID.
+ :var int mac_entries_count: Number of MAC entries for bridge-domain.
+ """
+ super(TestL2fib, cls).setUpClass()
+
+ # Test variables
+ cls.bd_id = 1
+ cls.mac_entries_count = 200
+
+ try:
+ # Create 4 pg interfaces
+ cls.create_pg_interfaces(range(4))
+
+ # Packet flows mapping pg0 -> pg1, pg2, pg3 etc.
+ cls.flows = dict()
+ cls.flows[cls.pg0] = [cls.pg1, cls.pg2, cls.pg3]
+ cls.flows[cls.pg1] = [cls.pg0, cls.pg2, cls.pg3]
+ cls.flows[cls.pg2] = [cls.pg0, cls.pg1, cls.pg3]
+ cls.flows[cls.pg3] = [cls.pg0, cls.pg1, cls.pg2]
+
+ # Packet sizes
+ cls.pg_if_packet_sizes = [64, 512, 1518, 9018]
+
+ # Create BD with MAC learning and unknown unicast flooding disabled
+ # and put interfaces to this BD
+ cls.vapi.bridge_domain_add_del(bd_id=cls.bd_id, uu_flood=0, learn=0)
+ for pg_if in cls.pg_interfaces:
+ cls.vapi.sw_interface_set_l2_bridge(pg_if.sw_if_index,
+ bd_id=cls.bd_id)
+
+ # Set up all interfaces
+ for i in cls.pg_interfaces:
+ i.admin_up()
+
+ # Mapping between packet-generator index and lists of test hosts
+ cls.hosts_by_pg_idx = dict()
+ for pg_if in cls.pg_interfaces:
+ cls.hosts_by_pg_idx[pg_if.sw_if_index] = []
+
+ # Create list of deleted hosts
+ cls.deleted_hosts_by_pg_idx = dict()
+ for pg_if in cls.pg_interfaces:
+ cls.deleted_hosts_by_pg_idx[pg_if.sw_if_index] = []
+
+ except Exception:
+ super(TestL2fib, cls).tearDownClass()
+ raise
+
+ def setUp(self):
+ """
+ Clear trace and packet infos before running each test.
+ """
+ super(TestL2fib, self).setUp()
+ self.packet_infos = {}
+
+ def tearDown(self):
+ """
+ Show various debug prints after each test.
+ """
+ super(TestL2fib, self).tearDown()
+ if not self.vpp_dead:
+ self.logger.info(self.vapi.ppcli("show l2fib verbose"))
+ self.logger.info(self.vapi.ppcli("show bridge-domain %s detail"
+ % self.bd_id))
+
+ def create_hosts(self, count, start=0):
+ """
+ Create required number of host MAC addresses and distribute them among
+ interfaces. Create host IPv4 address for every host MAC address.
+
+ :param int count: Number of hosts to create MAC/IPv4 addresses for.
+ :param int start: Number to start numbering from.
+ """
+ n_int = len(self.pg_interfaces)
+ macs_per_if = count / n_int
+ i = -1
+ for pg_if in self.pg_interfaces:
+ i += 1
+ start_nr = macs_per_if * i + start
+ end_nr = count + start if i == (n_int - 1) \
+ else macs_per_if * (i + 1) + start
+ hosts = self.hosts_by_pg_idx[pg_if.sw_if_index]
+ for j in range(start_nr, end_nr):
+ host = Host(
+ "00:00:00:ff:%02x:%02x" % (pg_if.sw_if_index, j),
+ "172.17.1%02x.%u" % (pg_if.sw_if_index, j))
+ hosts.append(host)
+
+ def config_l2_fib_entries(self, count, start=0):
+ """
+ Create required number of L2 FIB entries.
+
+ :param int count: Number of L2 FIB entries to be created.
+ :param int start: Starting index of the host list. (Default value = 0)
+ """
+ n_int = len(self.pg_interfaces)
+ percent = 0
+ counter = 0.0
+ for pg_if in self.pg_interfaces:
+ end_nr = start + count / n_int
+ for j in range(start, end_nr):
+ host = self.hosts_by_pg_idx[pg_if.sw_if_index][j]
+ self.vapi.l2fib_add_del(host.mac, self.bd_id, pg_if.sw_if_index,
+ static_mac=1)
+ counter += 1
+ percentage = counter / count * 100
+ if percentage > percent:
+ self.logger.info("Configure %d L2 FIB entries .. %d%% done"
+ % (count, percentage))
+ percent += 1
+ self.logger.info(self.vapi.ppcli("show l2fib"))
+
+ def delete_l2_fib_entry(self, count):
+ """
+ Delete required number of L2 FIB entries.
+
+ :param int count: Number of L2 FIB entries to be created.
+ """
+ n_int = len(self.pg_interfaces)
+ percent = 0
+ counter = 0.0
+ for pg_if in self.pg_interfaces:
+ for j in range(count / n_int):
+ host = self.hosts_by_pg_idx[pg_if.sw_if_index][0]
+ self.vapi.l2fib_add_del(host.mac, self.bd_id, pg_if.sw_if_index,
+ is_add=0)
+ self.deleted_hosts_by_pg_idx[pg_if.sw_if_index].append(host)
+ del self.hosts_by_pg_idx[pg_if.sw_if_index][0]
+ counter += 1
+ percentage = counter / count * 100
+ if percentage > percent:
+ self.logger.info("Delete %d L2 FIB entries .. %d%% done"
+ % (count, percentage))
+ percent += 1
+ self.logger.info(self.vapi.ppcli("show l2fib"))
+
+ def create_stream(self, src_if, packet_sizes, deleted=False):
+ """
+ Create input packet stream for defined interface using hosts or
+ deleted_hosts list.
+
+ :param object src_if: Interface to create packet stream for.
+ :param list packet_sizes: List of required packet sizes.
+ :param boolean deleted: Set to True if deleted_hosts list required.
+ :return: Stream of packets.
+ """
+ pkts = []
+ src_hosts = self.hosts_by_pg_idx[src_if.sw_if_index]
+ for dst_if in self.flows[src_if]:
+ dst_hosts = self.deleted_hosts_by_pg_idx[dst_if.sw_if_index]\
+ if deleted else self.hosts_by_pg_idx[dst_if.sw_if_index]
+ n_int = len(dst_hosts)
+ for i in range(0, n_int):
+ dst_host = dst_hosts[i]
+ src_host = random.choice(src_hosts)
+ pkt_info = self.create_packet_info(
+ src_if.sw_if_index, dst_if.sw_if_index)
+ payload = self.info_to_payload(pkt_info)
+ p = (Ether(dst=dst_host.mac, src=src_host.mac) /
+ IP(src=src_host.ip4, dst=dst_host.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ pkt_info.data = p.copy()
+ size = random.choice(packet_sizes)
+ self.extend_packet(p, size)
+ pkts.append(p)
+ return pkts
+
+ def verify_capture(self, pg_if, capture):
+ """
+ Verify captured input packet stream for defined interface.
+
+ :param object pg_if: Interface to verify captured packet stream for.
+ :param list capture: Captured packet stream.
+ """
+ last_info = dict()
+ for i in self.pg_interfaces:
+ last_info[i.sw_if_index] = None
+ dst_sw_if_index = pg_if.sw_if_index
+ for packet in capture:
+ payload_info = self.payload_to_info(str(packet[Raw]))
+ try:
+ ip = packet[IP]
+ udp = packet[UDP]
+ packet_index = payload_info.index
+ self.assertEqual(payload_info.dst, dst_sw_if_index)
+ self.logger.debug("Got packet on port %s: src=%u (id=%u)" %
+ (pg_if.name, payload_info.src, packet_index))
+ next_info = self.get_next_packet_info_for_interface2(
+ payload_info.src, dst_sw_if_index,
+ last_info[payload_info.src])
+ last_info[payload_info.src] = next_info
+ self.assertTrue(next_info is not None)
+ self.assertEqual(packet_index, next_info.index)
+ saved_packet = next_info.data
+ # Check standard fields
+ self.assertEqual(ip.src, saved_packet[IP].src)
+ self.assertEqual(ip.dst, saved_packet[IP].dst)
+ self.assertEqual(udp.sport, saved_packet[UDP].sport)
+ self.assertEqual(udp.dport, saved_packet[UDP].dport)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+ for i in self.pg_interfaces:
+ remaining_packet = self.get_next_packet_info_for_interface2(
+ i, dst_sw_if_index, last_info[i.sw_if_index])
+ self.assertTrue(
+ remaining_packet is None,
+ "Port %u: Packet expected from source %u didn't arrive" %
+ (dst_sw_if_index, i.sw_if_index))
+
+ def run_verify_test(self):
+ # Test
+ # Create incoming packet streams for packet-generator interfaces
+ for i in self.pg_interfaces:
+ pkts = self.create_stream(i, self.pg_if_packet_sizes)
+ i.add_stream(pkts)
+
+ # Enable packet capture and start packet sending
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # Verify
+ # Verify outgoing packet streams per packet-generator interface
+ for i in self.pg_interfaces:
+ capture = i.get_capture()
+ self.logger.info("Verifying capture on interface %s" % i.name)
+ self.verify_capture(i, capture)
+
+ def run_verify_negat_test(self):
+ # Test
+ # Create incoming packet streams for packet-generator interfaces for
+ # deleted MAC addresses
+ self.packet_infos = {}
+ for i in self.pg_interfaces:
+ pkts = self.create_stream(i, self.pg_if_packet_sizes, deleted=True)
+ i.add_stream(pkts)
+
+ # Enable packet capture and start packet sending
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # Verify
+ # Verify outgoing packet streams per packet-generator interface
+ for i in self.pg_interfaces:
+ i.assert_nothing_captured(remark="outgoing interface")
+
+ def test_l2_fib_01(self):
+ """ L2 FIB test 1 - program 100 MAC addresses
+ """
+ # Config 1
+ # Create test host entries
+ self.create_hosts(100)
+
+ # Add first 100 MAC entries to L2 FIB
+ self.config_l2_fib_entries(100)
+
+ # Test 1
+ self.run_verify_test()
+
+ def test_l2_fib_02(self):
+ """ L2 FIB test 2 - delete 12 MAC entries
+ """
+ # Config 2
+ # Delete 12 MAC entries (3 per interface) from L2 FIB
+ self.delete_l2_fib_entry(12)
+
+ # Test 2a
+ self.run_verify_test()
+
+ # Verify 2a
+ self.run_verify_negat_test()
+
+ def test_l2_fib_03(self):
+ """ L2 FIB test 3 - program new 100 MAC addresses
+ """
+ # Config 3
+ # Create new test host entries
+ self.create_hosts(100, start=100)
+
+ # Add new 100 MAC entries to L2 FIB
+ self.config_l2_fib_entries(100, start=22)
+
+ # Test 3
+ self.run_verify_test()
+
+ def test_l2_fib_04(self):
+ """ L2 FIB test 4 - delete 160 MAC entries
+ """
+ # Config 4
+ # Delete 160 MAC entries (40 per interface) from L2 FIB
+ self.delete_l2_fib_entry(160)
+
+ # Test 4a
+ self.run_verify_negat_test()
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/vpp/test/test_l2bd.py b/vpp/test/test_l2bd.py
new file mode 100644
index 00000000..50720e64
--- /dev/null
+++ b/vpp/test/test_l2bd.py
@@ -0,0 +1,286 @@
+#!/usr/bin/env python
+
+import unittest
+import random
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether, Dot1Q
+from scapy.layers.inet import IP, UDP
+
+from framework import VppTestCase, VppTestRunner
+from util import Host, ppp
+from vpp_sub_interface import VppDot1QSubint, VppDot1ADSubint
+
+
+class TestL2bd(VppTestCase):
+ """ L2BD Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Perform standard class setup (defined by class method setUpClass in
+ class VppTestCase) before running the test case, set test case related
+ variables and configure VPP.
+
+ :var int bd_id: Bridge domain ID.
+ :var int mac_entries_count: Number of MAC entries for bridge-domain to
+ learn.
+ :var int dot1q_tag: VLAN tag for dot1q sub-interface.
+ :var int dot1ad_sub_id: SubID of dot1ad sub-interface.
+ :var int dot1ad_outer_tag: VLAN S-tag for dot1ad sub-interface.
+ :var int dot1ad_inner_tag: VLAN C-tag for dot1ad sub-interface.
+ :var int sl_pkts_per_burst: Number of packets in burst for single-loop
+ test.
+ :var int dl_pkts_per_burst: Number of packets in burst for dual-loop
+ test.
+ """
+ super(TestL2bd, cls).setUpClass()
+
+ # Test variables
+ cls.bd_id = 1
+ cls.mac_entries_count = 100
+ # cls.dot1q_sub_id = 100
+ cls.dot1q_tag = 100
+ cls.dot1ad_sub_id = 20
+ cls.dot1ad_outer_tag = 200
+ cls.dot1ad_inner_tag = 300
+ cls.sl_pkts_per_burst = 2
+ cls.dl_pkts_per_burst = 257
+
+ try:
+ # create 3 pg interfaces
+ cls.create_pg_interfaces(range(3))
+
+ # create 2 sub-interfaces for pg1 and pg2
+ cls.sub_interfaces = [
+ VppDot1QSubint(cls, cls.pg1, cls.dot1q_tag),
+ VppDot1ADSubint(cls, cls.pg2, cls.dot1ad_sub_id,
+ cls.dot1ad_outer_tag, cls.dot1ad_inner_tag)]
+
+ # packet flows mapping pg0 -> pg1, pg2, etc.
+ cls.flows = dict()
+ cls.flows[cls.pg0] = [cls.pg1, cls.pg2]
+ cls.flows[cls.pg1] = [cls.pg0, cls.pg2]
+ cls.flows[cls.pg2] = [cls.pg0, cls.pg1]
+
+ # packet sizes
+ cls.pg_if_packet_sizes = [64, 512, 1518, 9018]
+ cls.sub_if_packet_sizes = [64, 512, 1518 + 4, 9018 + 4]
+
+ cls.interfaces = list(cls.pg_interfaces)
+ cls.interfaces.extend(cls.sub_interfaces)
+
+ # Create BD with MAC learning enabled and put interfaces and
+ # sub-interfaces to this BD
+ for pg_if in cls.pg_interfaces:
+ sw_if_index = pg_if.sub_if.sw_if_index \
+ if hasattr(pg_if, 'sub_if') else pg_if.sw_if_index
+ cls.vapi.sw_interface_set_l2_bridge(sw_if_index,
+ bd_id=cls.bd_id)
+
+ # setup all interfaces
+ for i in cls.interfaces:
+ i.admin_up()
+
+ # mapping between packet-generator index and lists of test hosts
+ cls.hosts_by_pg_idx = dict()
+
+ # create test host entries and inject packets to learn MAC entries
+ # in the bridge-domain
+ cls.create_hosts_and_learn(cls.mac_entries_count)
+ cls.logger.info(cls.vapi.ppcli("show l2fib"))
+
+ except Exception:
+ super(TestL2bd, cls).tearDownClass()
+ raise
+
+ def setUp(self):
+ """
+ Clear trace and packet infos before running each test.
+ """
+ super(TestL2bd, self).setUp()
+ self.packet_infos = {}
+
+ def tearDown(self):
+ """
+ Show various debug prints after each test.
+ """
+ super(TestL2bd, self).tearDown()
+ if not self.vpp_dead:
+ self.logger.info(self.vapi.ppcli("show l2fib verbose"))
+ self.logger.info(self.vapi.ppcli("show bridge-domain %s detail" %
+ self.bd_id))
+
+ @classmethod
+ def create_hosts_and_learn(cls, count):
+ """
+ Create required number of host MAC addresses and distribute them among
+ interfaces. Create host IPv4 address for every host MAC address. Create
+ L2 MAC packet stream with host MAC addresses per interface to let
+ the bridge domain learn these MAC addresses.
+
+ :param count: Integer number of hosts to create MAC/IPv4 addresses for.
+ """
+ n_int = len(cls.pg_interfaces)
+ macs_per_if = count / n_int
+ i = -1
+ for pg_if in cls.pg_interfaces:
+ i += 1
+ start_nr = macs_per_if * i
+ end_nr = count if i == (n_int - 1) else macs_per_if * (i + 1)
+ cls.hosts_by_pg_idx[pg_if.sw_if_index] = []
+ hosts = cls.hosts_by_pg_idx[pg_if.sw_if_index]
+ packets = []
+ for j in range(start_nr, end_nr):
+ host = Host(
+ "00:00:00:ff:%02x:%02x" % (pg_if.sw_if_index, j),
+ "172.17.1%02x.%u" % (pg_if.sw_if_index, j))
+ packet = (Ether(dst="ff:ff:ff:ff:ff:ff", src=host.mac))
+ hosts.append(host)
+ if hasattr(pg_if, 'sub_if'):
+ packet = pg_if.sub_if.add_dot1_layer(packet)
+ packets.append(packet)
+ pg_if.add_stream(packets)
+ cls.logger.info("Sending broadcast eth frames for MAC learning")
+ cls.pg_start()
+
+ def create_stream(self, src_if, packet_sizes, packets_per_burst):
+ """
+ Create input packet stream for defined interface.
+
+ :param object src_if: Interface to create packet stream for.
+ :param list packet_sizes: List of required packet sizes.
+ :param int packets_per_burst: Number of packets in burst.
+ :return: Stream of packets.
+ """
+ pkts = []
+ for i in range(0, packets_per_burst):
+ dst_if = self.flows[src_if][i % 2]
+ dst_host = random.choice(self.hosts_by_pg_idx[dst_if.sw_if_index])
+ src_host = random.choice(self.hosts_by_pg_idx[src_if.sw_if_index])
+ pkt_info = self.create_packet_info(
+ src_if.sw_if_index, dst_if.sw_if_index)
+ payload = self.info_to_payload(pkt_info)
+ p = (Ether(dst=dst_host.mac, src=src_host.mac) /
+ IP(src=src_host.ip4, dst=dst_host.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ pkt_info.data = p.copy()
+ if hasattr(src_if, 'sub_if'):
+ p = src_if.sub_if.add_dot1_layer(p)
+ size = random.choice(packet_sizes)
+ self.extend_packet(p, size)
+ pkts.append(p)
+ return pkts
+
+ def verify_capture(self, pg_if, capture):
+ """
+ Verify captured input packet stream for defined interface.
+
+ :param object pg_if: Interface to verify captured packet stream for.
+ :param list capture: Captured packet stream.
+ """
+ last_info = dict()
+ for i in self.pg_interfaces:
+ last_info[i.sw_if_index] = None
+ dst_sw_if_index = pg_if.sw_if_index
+ for packet in capture:
+ payload_info = self.payload_to_info(str(packet[Raw]))
+ src_sw_if_index = payload_info.src
+ src_if = None
+ for ifc in self.pg_interfaces:
+ if ifc != pg_if:
+ if ifc.sw_if_index == src_sw_if_index:
+ src_if = ifc
+ break
+ if hasattr(src_if, 'sub_if'):
+ # Check VLAN tags and Ethernet header
+ packet = src_if.sub_if.remove_dot1_layer(packet)
+ self.assertTrue(Dot1Q not in packet)
+ try:
+ ip = packet[IP]
+ udp = packet[UDP]
+ packet_index = payload_info.index
+ self.assertEqual(payload_info.dst, dst_sw_if_index)
+ self.logger.debug("Got packet on port %s: src=%u (id=%u)" %
+ (pg_if.name, payload_info.src, packet_index))
+ next_info = self.get_next_packet_info_for_interface2(
+ payload_info.src, dst_sw_if_index,
+ last_info[payload_info.src])
+ last_info[payload_info.src] = next_info
+ self.assertTrue(next_info is not None)
+ self.assertEqual(packet_index, next_info.index)
+ saved_packet = next_info.data
+ # Check standard fields
+ self.assertEqual(ip.src, saved_packet[IP].src)
+ self.assertEqual(ip.dst, saved_packet[IP].dst)
+ self.assertEqual(udp.sport, saved_packet[UDP].sport)
+ self.assertEqual(udp.dport, saved_packet[UDP].dport)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+ for i in self.pg_interfaces:
+ remaining_packet = self.get_next_packet_info_for_interface2(
+ i, dst_sw_if_index, last_info[i.sw_if_index])
+ self.assertTrue(
+ remaining_packet is None,
+ "Port %u: Packet expected from source %u didn't arrive" %
+ (dst_sw_if_index, i.sw_if_index))
+
+ def run_l2bd_test(self, pkts_per_burst):
+ """ L2BD MAC learning test """
+
+ # Create incoming packet streams for packet-generator interfaces
+ for i in self.pg_interfaces:
+ packet_sizes = self.sub_if_packet_sizes if hasattr(i, 'sub_if') \
+ else self.pg_if_packet_sizes
+ pkts = self.create_stream(i, packet_sizes, pkts_per_burst)
+ i.add_stream(pkts)
+
+ # Enable packet capture and start packet sending
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # Verify outgoing packet streams per packet-generator interface
+ for i in self.pg_interfaces:
+ capture = i.get_capture()
+ self.logger.info("Verifying capture on interface %s" % i.name)
+ self.verify_capture(i, capture)
+
+ def test_l2bd_sl(self):
+ """ L2BD MAC learning single-loop test
+
+ Test scenario:
+ 1.config
+ MAC learning enabled
+ learn 100 MAC enries
+ 3 interfaces: untagged, dot1q, dot1ad (dot1q used instead of
+ dot1ad in the first version)
+
+ 2.sending l2 eth pkts between 3 interface
+ 64B, 512B, 1518B, 9200B (ether_size)
+ burst of 2 pkts per interface
+ """
+
+ self.run_l2bd_test(self.sl_pkts_per_burst)
+
+ def test_l2bd_dl(self):
+ """ L2BD MAC learning dual-loop test
+
+ Test scenario:
+ 1.config
+ MAC learning enabled
+ learn 100 MAC enries
+ 3 interfaces: untagged, dot1q, dot1ad (dot1q used instead of
+ dot1ad in the first version)
+
+ 2.sending l2 eth pkts between 3 interface
+ 64B, 512B, 1518B, 9200B (ether_size)
+ burst of 257 pkts per interface
+ """
+
+ self.run_l2bd_test(self.dl_pkts_per_burst)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/vpp/test/test_l2bd_multi_instance.py b/vpp/test/test_l2bd_multi_instance.py
new file mode 100644
index 00000000..1272d765
--- /dev/null
+++ b/vpp/test/test_l2bd_multi_instance.py
@@ -0,0 +1,498 @@
+#!/usr/bin/env python
+"""L2BD Multi-instance Test Case HLD:
+
+**NOTES:**
+ - higher number of pg-l2 interfaces causes problems => only 15 pg-l2 \
+ interfaces in 5 bridge domains are tested
+ - jumbo packets in configuration with 14 l2-pg interfaces leads to \
+ problems too
+
+**config 1**
+ - add 15 pg-l2 interfaces
+ - configure one host per pg-l2 interface
+ - configure 5 bridge domains (BD)
+ - add 3 pg-l2 interfaces per BD
+
+**test 1**
+ - send L2 MAC frames between all pg-l2 interfaces of all BDs
+
+**verify 1**
+ - check BD data by parsing output of bridge_domain_dump API command
+ - all packets received correctly
+
+**config 2**
+ - update data of 5 BD
+ - disable learning, forwarding, flooding and uu_flooding for BD1
+ - disable forwarding for BD2
+ - disable flooding for BD3
+ - disable uu_flooding for BD4
+ - disable learning for BD5
+
+**verify 2**
+ - check BD data by parsing output of bridge_domain_dump API command
+
+**config 3**
+ - delete 2 BDs
+
+**test 3**
+ - send L2 MAC frames between all pg-l2 interfaces of all BDs
+ - send L2 MAC frames between all pg-l2 interfaces formerly assigned to \
+ deleted BDs
+
+**verify 3**
+ - check BD data by parsing output of bridge_domain_dump API command
+ - all packets received correctly on all 3 pg-l2 interfaces assigned to BDs
+ - no packet received on all 3 pg-l2 interfaces of all deleted BDs
+
+**config 4**
+ - add 2 BDs
+ - add 3 pg-l2 interfaces per BD
+
+**test 4**
+ - send L2 MAC frames between all pg-l2 interfaces of all BDs
+
+**verify 4**
+ - check BD data by parsing output of bridge_domain_dump API command
+ - all packets received correctly
+
+**config 5**
+ - delete 5 BDs
+
+**verify 5**
+ - check BD data by parsing output of bridge_domain_dump API command
+"""
+
+import unittest
+import random
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, UDP
+
+from framework import VppTestCase, VppTestRunner
+from util import Host, ppp
+
+
+@unittest.skip("Crashes VPP")
+class TestL2bdMultiInst(VppTestCase):
+ """ L2BD Multi-instance Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Perform standard class setup (defined by class method setUpClass in
+ class VppTestCase) before running the test case, set test case related
+ variables and configure VPP.
+ """
+ super(TestL2bdMultiInst, cls).setUpClass()
+
+ try:
+ # Create pg interfaces
+ cls.create_pg_interfaces(range(15))
+
+ # Packet flows mapping pg0 -> pg1, pg2 etc.
+ cls.flows = dict()
+ for i in range(0, len(cls.pg_interfaces), 3):
+ cls.flows[cls.pg_interfaces[i]] = [cls.pg_interfaces[i + 1],
+ cls.pg_interfaces[i + 2]]
+ cls.flows[cls.pg_interfaces[i + 1]] = [cls.pg_interfaces[i],
+ cls.pg_interfaces[i + 2]]
+ cls.flows[cls.pg_interfaces[i + 2]] = [cls.pg_interfaces[i],
+ cls.pg_interfaces[i + 1]]
+
+ # Mapping between packet-generator index and lists of test hosts
+ cls.hosts_by_pg_idx = dict()
+ for pg_if in cls.pg_interfaces:
+ cls.hosts_by_pg_idx[pg_if.sw_if_index] = []
+
+ # Create test host entries
+ cls.create_hosts(75)
+
+ # Packet sizes - jumbo packet (9018 bytes) skipped
+ cls.pg_if_packet_sizes = [64, 512, 1518]
+
+ # Set up all interfaces
+ for i in cls.pg_interfaces:
+ i.admin_up()
+
+ # Create list of BDs
+ cls.bd_list = list()
+
+ # Create list of deleted BDs
+ cls.bd_deleted_list = list()
+
+ # Create list of pg_interfaces in BDs
+ cls.pg_in_bd = list()
+
+ # Create list of pg_interfaces not in BDs
+ cls.pg_not_in_bd = list()
+ for pg_if in cls.pg_interfaces:
+ cls.pg_not_in_bd.append(pg_if)
+
+ except Exception:
+ super(TestL2bdMultiInst, cls).tearDownClass()
+ raise
+
+ def setUp(self):
+ """
+ Clear trace and packet infos before running each test.
+ """
+ super(TestL2bdMultiInst, self).setUp()
+ self.packet_infos = {}
+
+ def tearDown(self):
+ """
+ Show various debug prints after each test.
+ """
+ super(TestL2bdMultiInst, self).tearDown()
+ if not self.vpp_dead:
+ self.logger.info(self.vapi.ppcli("show l2fib verbose"))
+ self.logger.info(self.vapi.ppcli("show bridge-domain"))
+
+ @classmethod
+ def create_hosts(cls, count):
+ """
+ Create required number of host MAC addresses and distribute them among
+ interfaces. Create host IPv4 address for every host MAC address.
+
+ :param int count: Number of hosts to create MAC/IPv4 addresses for.
+ """
+ n_int = len(cls.pg_interfaces)
+ macs_per_if = count / n_int
+ i = -1
+ for pg_if in cls.pg_interfaces:
+ i += 1
+ start_nr = macs_per_if * i
+ end_nr = count if i == (n_int - 1) else macs_per_if * (i + 1)
+ hosts = cls.hosts_by_pg_idx[pg_if.sw_if_index]
+ for j in range(start_nr, end_nr):
+ host = Host(
+ "00:00:00:ff:%02x:%02x" % (pg_if.sw_if_index, j),
+ "172.17.1%02u.%u" % (pg_if.sw_if_index, j))
+ hosts.append(host)
+
+ def create_bd_and_mac_learn(self, count, start=1):
+ """
+ Create required number of bridge domains with MAC learning enabled, put
+ 3 l2-pg interfaces to every bridge domain and send MAC learning packets.
+
+ :param int count: Number of bridge domains to be created.
+ :param int start: Starting number of the bridge domain ID.
+ (Default value = 1)
+ """
+ for i in range(count):
+ bd_id = i + start
+ self.vapi.bridge_domain_add_del(bd_id=bd_id)
+ self.logger.info("Bridge domain ID %d created" % bd_id)
+ if self.bd_list.count(bd_id) == 0:
+ self.bd_list.append(bd_id)
+ if self.bd_deleted_list.count(bd_id) == 1:
+ self.bd_deleted_list.remove(bd_id)
+ for j in range(3):
+ pg_if = self.pg_interfaces[(i + start - 1) * 3 + j]
+ self.vapi.sw_interface_set_l2_bridge(pg_if.sw_if_index,
+ bd_id=bd_id)
+ self.logger.info("pg-interface %s added to bridge domain ID %d"
+ % (pg_if.name, bd_id))
+ self.pg_in_bd.append(pg_if)
+ self.pg_not_in_bd.remove(pg_if)
+ packets = []
+ for host in self.hosts_by_pg_idx[pg_if.sw_if_index]:
+ packet = (Ether(dst="ff:ff:ff:ff:ff:ff", src=host.mac))
+ packets.append(packet)
+ pg_if.add_stream(packets)
+ self.logger.info("Sending broadcast eth frames for MAC learning")
+ self.pg_start()
+ self.logger.info(self.vapi.ppcli("show bridge-domain"))
+ self.logger.info(self.vapi.ppcli("show l2fib"))
+
+ def delete_bd(self, count, start=1):
+ """
+ Delete required number of bridge domains.
+
+ :param int count: Number of bridge domains to be created.
+ :param int start: Starting number of the bridge domain ID.
+ (Default value = 1)
+ """
+ for i in range(count):
+ bd_id = i + start
+ self.vapi.bridge_domain_add_del(bd_id=bd_id, is_add=0)
+ if self.bd_list.count(bd_id) == 1:
+ self.bd_list.remove(bd_id)
+ if self.bd_deleted_list.count(bd_id) == 0:
+ self.bd_deleted_list.append(bd_id)
+ for j in range(3):
+ pg_if = self.pg_interfaces[(i + start - 1) * 3 + j]
+ self.pg_in_bd.remove(pg_if)
+ self.pg_not_in_bd.append(pg_if)
+ self.logger.info("Bridge domain ID %d deleted" % bd_id)
+
+ def create_stream(self, src_if, packet_sizes):
+ """
+ Create input packet stream for defined interface using hosts list.
+
+ :param object src_if: Interface to create packet stream for.
+ :param list packet_sizes: List of required packet sizes.
+ :return: Stream of packets.
+ """
+ pkts = []
+ src_hosts = self.hosts_by_pg_idx[src_if.sw_if_index]
+ for dst_if in self.flows[src_if]:
+ dst_hosts = self.hosts_by_pg_idx[dst_if.sw_if_index]
+ n_int = len(dst_hosts)
+ for i in range(0, n_int):
+ dst_host = dst_hosts[i]
+ src_host = random.choice(src_hosts)
+ pkt_info = self.create_packet_info(
+ src_if.sw_if_index, dst_if.sw_if_index)
+ payload = self.info_to_payload(pkt_info)
+ p = (Ether(dst=dst_host.mac, src=src_host.mac) /
+ IP(src=src_host.ip4, dst=dst_host.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ pkt_info.data = p.copy()
+ size = random.choice(packet_sizes)
+ self.extend_packet(p, size)
+ pkts.append(p)
+ self.logger.debug("Input stream created for port %s. Length: %u pkt(s)"
+ % (src_if.name, len(pkts)))
+ return pkts
+
+ def verify_capture(self, pg_if, capture):
+ """
+ Verify captured input packet stream for defined interface.
+
+ :param object pg_if: Interface to verify captured packet stream for.
+ :param list capture: Captured packet stream.
+ """
+ last_info = dict()
+ for i in self.pg_interfaces:
+ last_info[i.sw_if_index] = None
+ dst_sw_if_index = pg_if.sw_if_index
+ for packet in capture:
+ payload_info = self.payload_to_info(str(packet[Raw]))
+ try:
+ ip = packet[IP]
+ udp = packet[UDP]
+ packet_index = payload_info.index
+ self.assertEqual(payload_info.dst, dst_sw_if_index)
+ self.logger.debug("Got packet on port %s: src=%u (id=%u)" %
+ (pg_if.name, payload_info.src, packet_index))
+ next_info = self.get_next_packet_info_for_interface2(
+ payload_info.src, dst_sw_if_index,
+ last_info[payload_info.src])
+ last_info[payload_info.src] = next_info
+ self.assertTrue(next_info is not None)
+ self.assertEqual(packet_index, next_info.index)
+ saved_packet = next_info.data
+ # Check standard fields
+ self.assertEqual(ip.src, saved_packet[IP].src)
+ self.assertEqual(ip.dst, saved_packet[IP].dst)
+ self.assertEqual(udp.sport, saved_packet[UDP].sport)
+ self.assertEqual(udp.dport, saved_packet[UDP].dport)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+ for i in self.pg_interfaces:
+ remaining_packet = self.get_next_packet_info_for_interface2(
+ i, dst_sw_if_index, last_info[i.sw_if_index])
+ self.assertTrue(
+ remaining_packet is None,
+ "Port %u: Packet expected from source %u didn't arrive" %
+ (dst_sw_if_index, i.sw_if_index))
+
+ def set_bd_flags(self, bd_id, **args):
+ """
+ Enable/disable defined feature(s) of the bridge domain.
+
+ :param int bd_id: Bridge domain ID.
+ :param list args: List of feature/status pairs. Allowed features: \
+ learn, forward, flood, uu_flood and arp_term. Status False means \
+ disable, status True means enable the feature.
+ :raise: ValueError in case of unknown feature in the input.
+ """
+ for flag in args:
+ if flag == "learn":
+ feature_bitmap = 1 << 0
+ elif flag == "forward":
+ feature_bitmap = 1 << 1
+ elif flag == "flood":
+ feature_bitmap = 1 << 2
+ elif flag == "uu_flood":
+ feature_bitmap = 1 << 3
+ elif flag == "arp_term":
+ feature_bitmap = 1 << 4
+ else:
+ raise ValueError("Unknown feature used: %s" % flag)
+ is_set = 1 if args[flag] else 0
+ self.vapi.bridge_flags(bd_id, is_set, feature_bitmap)
+ self.logger.info("Bridge domain ID %d updated" % bd_id)
+
+ def verify_bd(self, bd_id, **args):
+ """
+ Check if the bridge domain is configured and verify expected status
+ of listed features.
+
+ :param int bd_id: Bridge domain ID.
+ :param list args: List of feature/status pairs. Allowed features: \
+ learn, forward, flood, uu_flood and arp_term. Status False means \
+ disable, status True means enable the feature.
+ :return: 1 if bridge domain is configured, otherwise return 0.
+ :raise: ValueError in case of unknown feature in the input.
+ """
+ bd_dump = self.vapi.bridge_domain_dump(bd_id)
+ if len(bd_dump) == 0:
+ self.logger.info("Bridge domain ID %d is not configured" % bd_id)
+ return 0
+ else:
+ bd_dump = bd_dump[0]
+ if len(args) > 0:
+ for flag in args:
+ expected_status = 1 if args[flag] else 0
+ if flag == "learn":
+ flag_status = bd_dump[6]
+ elif flag == "forward":
+ flag_status = bd_dump[5]
+ elif flag == "flood":
+ flag_status = bd_dump[3]
+ elif flag == "uu_flood":
+ flag_status = bd_dump[4]
+ elif flag == "arp_term":
+ flag_status = bd_dump[7]
+ else:
+ raise ValueError("Unknown feature used: %s" % flag)
+ self.assertEqual(expected_status, flag_status)
+ return 1
+
+ def run_verify_test(self):
+ """
+ Create packet streams for all configured l2-pg interfaces, send all \
+ prepared packet streams and verify that:
+ - all packets received correctly on all pg-l2 interfaces assigned
+ to bridge domains
+ - no packet received on all pg-l2 interfaces not assigned to
+ bridge domains
+
+ :raise RuntimeError: if no packet captured on l2-pg interface assigned
+ to the bridge domain or if any packet is captured
+ on l2-pg interface not assigned to the bridge
+ domain.
+ """
+ # Test
+ # Create incoming packet streams for packet-generator interfaces
+ for pg_if in self.pg_interfaces:
+ pkts = self.create_stream(pg_if, self.pg_if_packet_sizes)
+ pg_if.add_stream(pkts)
+
+ # Enable packet capture and start packet sending
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # Verify
+ # Verify outgoing packet streams per packet-generator interface
+ for pg_if in self.pg_interfaces:
+ capture = pg_if.get_capture()
+ if pg_if in self.pg_in_bd:
+ if len(capture) == 0:
+ raise RuntimeError("Interface %s is in BD but the capture "
+ "is empty!" % pg_if.name)
+ self.verify_capture(pg_if, capture)
+ elif pg_if in self.pg_not_in_bd:
+ try:
+ self.assertEqual(len(capture), 0)
+ except AssertionError:
+ raise RuntimeError("Interface %s is not in BD but "
+ "the capture is not empty!" % pg_if.name)
+ else:
+ self.logger.error("Unknown interface: %s" % pg_if.name)
+
+ def test_l2bd_inst_01(self):
+ """ L2BD Multi-instance test 1 - create 5 BDs
+ """
+ # Config 1
+ # Create 5 BDs, put interfaces to these BDs and send MAC learning
+ # packets
+ self.create_bd_and_mac_learn(5)
+
+ # Verify 1
+ for bd_id in self.bd_list:
+ self.assertEqual(self.verify_bd(bd_id), 1)
+
+ # Test 1
+ # self.vapi.cli("clear trace")
+ self.run_verify_test()
+
+ def test_l2bd_inst_02(self):
+ """ L2BD Multi-instance test 2 - update data of 5 BDs
+ """
+ # Config 2
+ # Update data of 5 BDs (disable learn, forward, flood, uu-flood)
+ self.set_bd_flags(self.bd_list[0], learn=False, forward=False,
+ flood=False, uu_flood=False)
+ self.set_bd_flags(self.bd_list[1], forward=False)
+ self.set_bd_flags(self.bd_list[2], flood=False)
+ self.set_bd_flags(self.bd_list[3], uu_flood=False)
+ self.set_bd_flags(self.bd_list[4], learn=False)
+
+ # Verify 2
+ # Skipping check of uu_flood as it is not returned by
+ # bridge_domain_dump api command
+ self.verify_bd(self.bd_list[0], learn=False, forward=False,
+ flood=False, uu_flood=False)
+ self.verify_bd(self.bd_list[1], learn=True, forward=False,
+ flood=True, uu_flood=True)
+ self.verify_bd(self.bd_list[2], learn=True, forward=True,
+ flood=False, uu_flood=True)
+ self.verify_bd(self.bd_list[3], learn=True, forward=True,
+ flood=True, uu_flood=False)
+ self.verify_bd(self.bd_list[4], learn=False, forward=True,
+ flood=True, uu_flood=True)
+
+ def test_l2bd_inst_03(self):
+ """ L2BD Multi-instance 3 - delete 2 BDs
+ """
+ # Config 3
+ # Delete 2 BDs
+ self.delete_bd(2)
+
+ # Verify 3
+ for bd_id in self.bd_deleted_list:
+ self.assertEqual(self.verify_bd(bd_id), 0)
+ for bd_id in self.bd_list:
+ self.assertEqual(self.verify_bd(bd_id), 1)
+
+ # Test 3
+ self.run_verify_test()
+
+ def test_l2bd_inst_04(self):
+ """ L2BD Multi-instance test 4 - add 2 BDs
+ """
+ # Config 4
+ # Create 5 BDs, put interfaces to these BDs and send MAC learning
+ # packets
+ self.create_bd_and_mac_learn(2)
+
+ # Verify 4
+ for bd_id in self.bd_list:
+ self.assertEqual(self.verify_bd(bd_id), 1)
+
+ # Test 4
+ # self.vapi.cli("clear trace")
+ self.run_verify_test()
+
+ def test_l2bd_inst_05(self):
+ """ L2BD Multi-instance 5 - delete 5 BDs
+ """
+ # Config 5
+ # Delete 5 BDs
+ self.delete_bd(5)
+
+ # Verify 5
+ for bd_id in self.bd_deleted_list:
+ self.assertEqual(self.verify_bd(bd_id), 0)
+ for bd_id in self.bd_list:
+ self.assertEqual(self.verify_bd(bd_id), 1)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/vpp/test/test_l2xc.py b/vpp/test/test_l2xc.py
new file mode 100644
index 00000000..37893042
--- /dev/null
+++ b/vpp/test/test_l2xc.py
@@ -0,0 +1,230 @@
+#!/usr/bin/env python
+
+import unittest
+import random
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, UDP
+
+from framework import VppTestCase, VppTestRunner
+from util import Host, ppp
+
+
+class TestL2xc(VppTestCase):
+ """ L2XC Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Perform standard class setup (defined by class method setUpClass in
+ class VppTestCase) before running the test case, set test case related
+ variables and configure VPP.
+
+ :var int hosts_nr: Number of hosts to be created.
+ :var int dl_pkts_per_burst: Number of packets in burst for dual-loop
+ test.
+ :var int sl_pkts_per_burst: Number of packets in burst for single-loop
+ test.
+ """
+ super(TestL2xc, cls).setUpClass()
+
+ # Test variables
+ cls.hosts_nr = 10
+ cls.dl_pkts_per_burst = 257
+ cls.sl_pkts_per_burst = 2
+
+ try:
+ # create 4 pg interfaces
+ cls.create_pg_interfaces(range(4))
+
+ # packet flows mapping pg0 -> pg1, pg2 -> pg3, etc.
+ cls.flows = dict()
+ cls.flows[cls.pg0] = [cls.pg1]
+ cls.flows[cls.pg1] = [cls.pg0]
+ cls.flows[cls.pg2] = [cls.pg3]
+ cls.flows[cls.pg3] = [cls.pg2]
+
+ # packet sizes
+ cls.pg_if_packet_sizes = [64, 512, 1518, 9018]
+
+ cls.interfaces = list(cls.pg_interfaces)
+
+ # Create bi-directional cross-connects between pg0 and pg1
+ cls.vapi.sw_interface_set_l2_xconnect(
+ cls.pg0.sw_if_index, cls.pg1.sw_if_index, enable=1)
+ cls.vapi.sw_interface_set_l2_xconnect(
+ cls.pg1.sw_if_index, cls.pg0.sw_if_index, enable=1)
+
+ # Create bi-directional cross-connects between pg2 and pg3
+ cls.vapi.sw_interface_set_l2_xconnect(
+ cls.pg2.sw_if_index, cls.pg3.sw_if_index, enable=1)
+ cls.vapi.sw_interface_set_l2_xconnect(
+ cls.pg3.sw_if_index, cls.pg2.sw_if_index, enable=1)
+
+ # mapping between packet-generator index and lists of test hosts
+ cls.hosts_by_pg_idx = dict()
+
+ # Create host MAC and IPv4 lists
+ cls.create_host_lists(cls.hosts_nr)
+
+ # setup all interfaces
+ for i in cls.interfaces:
+ i.admin_up()
+
+ except Exception:
+ super(TestL2xc, cls).tearDownClass()
+ raise
+
+ def setUp(self):
+ """
+ Clear trace and packet infos before running each test.
+ """
+ super(TestL2xc, self).setUp()
+ self.packet_infos = {}
+
+ def tearDown(self):
+ """
+ Show various debug prints after each test.
+ """
+ super(TestL2xc, self).tearDown()
+ if not self.vpp_dead:
+ self.logger.info(self.vapi.ppcli("show l2patch"))
+
+ @classmethod
+ def create_host_lists(cls, count):
+ """
+ Method to create required number of MAC and IPv4 addresses.
+ Create required number of host MAC addresses and distribute them among
+ interfaces. Create host IPv4 address for every host MAC address too.
+
+ :param count: Number of hosts to create MAC and IPv4 addresses for.
+ """
+ for pg_if in cls.pg_interfaces:
+ cls.hosts_by_pg_idx[pg_if.sw_if_index] = []
+ hosts = cls.hosts_by_pg_idx[pg_if.sw_if_index]
+ for j in range(0, count):
+ host = Host(
+ "00:00:00:ff:%02x:%02x" % (pg_if.sw_if_index, j),
+ "172.17.1%02x.%u" % (pg_if.sw_if_index, j))
+ hosts.append(host)
+
+ def create_stream(self, src_if, packet_sizes, packets_per_burst):
+ """
+ Create input packet stream for defined interface.
+
+ :param object src_if: Interface to create packet stream for.
+ :param list packet_sizes: List of required packet sizes.
+ :param int packets_per_burst: Number of packets in burst.
+ :return: Stream of packets.
+ """
+ pkts = []
+ for i in range(0, packets_per_burst):
+ dst_if = self.flows[src_if][0]
+ dst_host = random.choice(self.hosts_by_pg_idx[dst_if.sw_if_index])
+ src_host = random.choice(self.hosts_by_pg_idx[src_if.sw_if_index])
+ pkt_info = self.create_packet_info(
+ src_if.sw_if_index, dst_if.sw_if_index)
+ payload = self.info_to_payload(pkt_info)
+ p = (Ether(dst=dst_host.mac, src=src_host.mac) /
+ IP(src=src_host.ip4, dst=dst_host.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ pkt_info.data = p.copy()
+ size = random.choice(packet_sizes)
+ self.extend_packet(p, size)
+ pkts.append(p)
+ return pkts
+
+ def verify_capture(self, pg_if, capture):
+ """
+ Verify captured input packet stream for defined interface.
+
+ :param object pg_if: Interface to verify captured packet stream for.
+ :param list capture: Captured packet stream.
+ """
+ last_info = dict()
+ for i in self.interfaces:
+ last_info[i.sw_if_index] = None
+ dst_sw_if_index = pg_if.sw_if_index
+ for packet in capture:
+ try:
+ ip = packet[IP]
+ udp = packet[UDP]
+ payload_info = self.payload_to_info(str(packet[Raw]))
+ packet_index = payload_info.index
+ self.assertEqual(payload_info.dst, dst_sw_if_index)
+ self.logger.debug("Got packet on port %s: src=%u (id=%u)" %
+ (pg_if.name, payload_info.src, packet_index))
+ next_info = self.get_next_packet_info_for_interface2(
+ payload_info.src, dst_sw_if_index,
+ last_info[payload_info.src])
+ last_info[payload_info.src] = next_info
+ self.assertTrue(next_info is not None)
+ self.assertEqual(packet_index, next_info.index)
+ saved_packet = next_info.data
+ # Check standard fields
+ self.assertEqual(ip.src, saved_packet[IP].src)
+ self.assertEqual(ip.dst, saved_packet[IP].dst)
+ self.assertEqual(udp.sport, saved_packet[UDP].sport)
+ self.assertEqual(udp.dport, saved_packet[UDP].dport)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+ for i in self.interfaces:
+ remaining_packet = self.get_next_packet_info_for_interface2(
+ i, dst_sw_if_index, last_info[i.sw_if_index])
+ self.assertTrue(remaining_packet is None,
+ "Port %u: Packet expected from source %u didn't"
+ " arrive" % (dst_sw_if_index, i.sw_if_index))
+
+ def run_l2xc_test(self, pkts_per_burst):
+ """ L2XC test """
+
+ # Create incoming packet streams for packet-generator interfaces
+ for i in self.interfaces:
+ pkts = self.create_stream(i, self.pg_if_packet_sizes,
+ pkts_per_burst)
+ i.add_stream(pkts)
+
+ # Enable packet capturing and start packet sending
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # Verify outgoing packet streams per packet-generator interface
+ for i in self.pg_interfaces:
+ capture = i.get_capture()
+ self.logger.info("Verifying capture on interface %s" % i.name)
+ self.verify_capture(i, capture)
+
+ def test_l2xc_sl(self):
+ """ L2XC single-loop test
+
+ Test scenario:
+ 1. config
+ 2 pairs of 2 interfaces, l2xconnected
+
+ 2. sending l2 eth packets between 4 interfaces
+ 64B, 512B, 1518B, 9018B (ether_size)
+ burst of 2 packets per interface
+ """
+
+ self.run_l2xc_test(self.sl_pkts_per_burst)
+
+ def test_l2xc_dl(self):
+ """ L2XC dual-loop test
+
+ Test scenario:
+ 1. config
+ 2 pairs of 2 interfaces, l2xconnected
+
+ 2. sending l2 eth packets between 4 interfaces
+ 64B, 512B, 1518B, 9018B (ether_size)
+ burst of 257 packets per interface
+ """
+
+ self.run_l2xc_test(self.dl_pkts_per_burst)
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/vpp/test/test_l2xc_multi_instance.py b/vpp/test/test_l2xc_multi_instance.py
new file mode 100644
index 00000000..2e55674e
--- /dev/null
+++ b/vpp/test/test_l2xc_multi_instance.py
@@ -0,0 +1,345 @@
+#!/usr/bin/env python
+"""L2XC Multi-instance Test Case HLD:
+
+**NOTES:**
+ - higher number (more than 15) of pg-l2 interfaces causes problems => only \
+ 14 pg-l2 interfaces and 10 cross-connects are tested
+ - jumbo packets in configuration with 14 l2-pg interfaces leads to \
+ problems too
+
+**config 1**
+ - add 14 pg-l2 interfaces
+ - add 10 cross-connects (two cross-connects per pair of l2-pg interfaces)
+
+**test 1**
+ - send L2 MAC frames between all pairs of pg-l2 interfaces
+
+**verify 1**
+ - all packets received correctly in case of cross-connected l2-pg interfaces
+ - no packet received in case of not cross-connected l2-pg interfaces
+
+**config 2**
+ - delete 4 cross-connects
+
+**test 2**
+ - send L2 MAC frames between all pairs of pg-l2 interfaces
+
+**verify 2**
+ - all packets received correctly in case of cross-connected l2-pg interfaces
+ - no packet received in case of not cross-connected l2-pg interfaces
+
+**config 3**
+ - add new 4 cross-connects
+
+**test 3**
+ - send L2 MAC frames between all pairs of pg-l2 interfaces
+
+**verify 3**
+ - all packets received correctly in case of cross-connected l2-pg interfaces
+ - no packet received in case of not cross-connected l2-pg interfaces
+
+**config 4**
+ - delete 10 cross-connects
+
+**test 4**
+ - send L2 MAC frames between all pairs of pg-l2 interfaces
+
+**verify 4**
+ - no packet received on all of l2-pg interfaces (no cross-connect created)
+"""
+
+import unittest
+import random
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, UDP
+
+from framework import VppTestCase, VppTestRunner
+from util import Host, ppp
+
+
+class TestL2xcMultiInst(VppTestCase):
+ """ L2XC Multi-instance Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Perform standard class setup (defined by class method setUpClass in
+ class VppTestCase) before running the test case, set test case related
+ variables and configure VPP.
+ """
+ super(TestL2xcMultiInst, cls).setUpClass()
+
+ try:
+ # Create pg interfaces
+ cls.create_pg_interfaces(range(14))
+
+ # Packet flows mapping pg0 -> pg1 etc.
+ cls.flows = dict()
+ for i in range(len(cls.pg_interfaces)):
+ delta = 1 if i % 2 == 0 else -1
+ cls.flows[cls.pg_interfaces[i]] = [cls.pg_interfaces[i + delta]]
+
+ # Mapping between packet-generator index and lists of test hosts
+ cls.hosts_by_pg_idx = dict()
+ for pg_if in cls.pg_interfaces:
+ cls.hosts_by_pg_idx[pg_if.sw_if_index] = []
+
+ # Create test host entries
+ cls.create_hosts(70)
+
+ # Packet sizes - jumbo packet (9018 bytes) skipped
+ cls.pg_if_packet_sizes = [64, 512, 1518]
+
+ # Set up all interfaces
+ for i in cls.pg_interfaces:
+ i.admin_up()
+
+ # Create list of x-connected pg_interfaces
+ cls.pg_in_xc = list()
+
+ # Create list of not x-connected pg_interfaces
+ cls.pg_not_in_xc = list()
+ for pg_if in cls.pg_interfaces:
+ cls.pg_not_in_xc.append(pg_if)
+
+ except Exception:
+ super(TestL2xcMultiInst, cls).tearDownClass()
+ raise
+
+ def setUp(self):
+ """
+ Clear trace and packet infos before running each test.
+ """
+ super(TestL2xcMultiInst, self).setUp()
+ self.packet_infos = {}
+
+ def tearDown(self):
+ """
+ Show various debug prints after each test.
+ """
+ super(TestL2xcMultiInst, self).tearDown()
+ if not self.vpp_dead:
+ self.logger.info(self.vapi.ppcli("show l2patch"))
+
+ @classmethod
+ def create_hosts(cls, count):
+ """
+ Create required number of host MAC addresses and distribute them among
+ interfaces. Create host IPv4 address for every host MAC address.
+
+ :param int count: Number of hosts to create MAC/IPv4 addresses for.
+ """
+ n_int = len(cls.pg_interfaces)
+ macs_per_if = count / n_int
+ i = -1
+ for pg_if in cls.pg_interfaces:
+ i += 1
+ start_nr = macs_per_if * i
+ end_nr = count if i == (n_int - 1) else macs_per_if * (i + 1)
+ hosts = cls.hosts_by_pg_idx[pg_if.sw_if_index]
+ for j in range(start_nr, end_nr):
+ host = Host(
+ "00:00:00:ff:%02x:%02x" % (pg_if.sw_if_index, j),
+ "172.17.1%02u.%u" % (pg_if.sw_if_index, j))
+ hosts.append(host)
+
+ def create_xconnects(self, count, start=0):
+ """
+ Create required number of cross-connects (always two cross-connects per
+ pair of packet-generator interfaces).
+
+ :param int count: Number of cross-connects to be created.
+ :param int start: Starting index of packet-generator interfaces. \
+ (Default value = 0)
+ """
+ for i in range(count):
+ rx_if = self.pg_interfaces[i + start]
+ delta = 1 if i % 2 == 0 else -1
+ tx_if = self.pg_interfaces[i + start + delta]
+ self.vapi.sw_interface_set_l2_xconnect(rx_if.sw_if_index,
+ tx_if.sw_if_index, 1)
+ self.logger.info("Cross-connect from %s to %s created"
+ % (tx_if.name, rx_if.name))
+ if self.pg_in_xc.count(rx_if) == 0:
+ self.pg_in_xc.append(rx_if)
+ if self.pg_not_in_xc.count(rx_if) == 1:
+ self.pg_not_in_xc.remove(rx_if)
+
+ def delete_xconnects(self, count, start=0):
+ """
+ Delete required number of cross-connects (always two cross-connects per
+ pair of packet-generator interfaces).
+
+ :param int count: Number of cross-connects to be deleted.
+ :param int start: Starting index of packet-generator interfaces. \
+ (Default value = 0)
+ """
+ for i in range(count):
+ rx_if = self.pg_interfaces[i + start]
+ delta = 1 if i % 2 == 0 else -1
+ tx_if = self.pg_interfaces[i + start + delta]
+ self.vapi.sw_interface_set_l2_xconnect(rx_if.sw_if_index,
+ tx_if.sw_if_index, 0)
+ self.logger.info("Cross-connect from %s to %s deleted"
+ % (tx_if.name, rx_if.name))
+ if self.pg_not_in_xc.count(rx_if) == 0:
+ self.pg_not_in_xc.append(rx_if)
+ if self.pg_in_xc.count(rx_if) == 1:
+ self.pg_in_xc.remove(rx_if)
+
+ def create_stream(self, src_if, packet_sizes):
+ """
+ Create input packet stream for defined interface using hosts list.
+
+ :param object src_if: Interface to create packet stream for.
+ :param list packet_sizes: List of required packet sizes.
+ :return: Stream of packets.
+ """
+ pkts = []
+ src_hosts = self.hosts_by_pg_idx[src_if.sw_if_index]
+ for dst_if in self.flows[src_if]:
+ dst_hosts = self.hosts_by_pg_idx[dst_if.sw_if_index]
+ n_int = len(dst_hosts)
+ for i in range(0, n_int):
+ dst_host = dst_hosts[i]
+ src_host = random.choice(src_hosts)
+ pkt_info = self.create_packet_info(
+ src_if.sw_if_index, dst_if.sw_if_index)
+ payload = self.info_to_payload(pkt_info)
+ p = (Ether(dst=dst_host.mac, src=src_host.mac) /
+ IP(src=src_host.ip4, dst=dst_host.ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ pkt_info.data = p.copy()
+ size = random.choice(packet_sizes)
+ self.extend_packet(p, size)
+ pkts.append(p)
+ self.logger.debug("Input stream created for port %s. Length: %u pkt(s)"
+ % (src_if.name, len(pkts)))
+ return pkts
+
+ def verify_capture(self, pg_if, capture):
+ """
+ Verify captured input packet stream for defined interface.
+
+ :param object pg_if: Interface to verify captured packet stream for.
+ :param list capture: Captured packet stream.
+ """
+ last_info = dict()
+ for i in self.pg_interfaces:
+ last_info[i.sw_if_index] = None
+ dst_sw_if_index = pg_if.sw_if_index
+ for packet in capture:
+ payload_info = self.payload_to_info(str(packet[Raw]))
+ try:
+ ip = packet[IP]
+ udp = packet[UDP]
+ packet_index = payload_info.index
+ self.assertEqual(payload_info.dst, dst_sw_if_index)
+ self.logger.debug("Got packet on port %s: src=%u (id=%u)" %
+ (pg_if.name, payload_info.src, packet_index))
+ next_info = self.get_next_packet_info_for_interface2(
+ payload_info.src, dst_sw_if_index,
+ last_info[payload_info.src])
+ last_info[payload_info.src] = next_info
+ self.assertTrue(next_info is not None)
+ self.assertEqual(packet_index, next_info.index)
+ saved_packet = next_info.data
+ # Check standard fields
+ self.assertEqual(ip.src, saved_packet[IP].src)
+ self.assertEqual(ip.dst, saved_packet[IP].dst)
+ self.assertEqual(udp.sport, saved_packet[UDP].sport)
+ self.assertEqual(udp.dport, saved_packet[UDP].dport)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+ for i in self.pg_interfaces:
+ remaining_packet = self.get_next_packet_info_for_interface2(
+ i, dst_sw_if_index, last_info[i.sw_if_index])
+ self.assertTrue(
+ remaining_packet is None,
+ "Port %u: Packet expected from source %u didn't arrive" %
+ (dst_sw_if_index, i.sw_if_index))
+
+ def run_verify_test(self):
+ """
+ Create packet streams for all configured l2-pg interfaces, send all \
+ prepared packet streams and verify that:
+ - all packets received correctly on all pg-l2 interfaces assigned
+ to cross-connects
+ - no packet received on all pg-l2 interfaces not assigned to
+ cross-connects
+
+ :raise RuntimeError: if no packet captured on l2-pg interface assigned
+ to the cross-connect or if any packet is captured
+ on l2-pg interface not assigned to the
+ cross-connect.
+ """
+ # Test
+ # Create incoming packet streams for packet-generator interfaces
+ for pg_if in self.pg_interfaces:
+ pkts = self.create_stream(pg_if, self.pg_if_packet_sizes)
+ pg_if.add_stream(pkts)
+
+ # Enable packet capture and start packet sending
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # Verify
+ # Verify outgoing packet streams per packet-generator interface
+ for pg_if in self.pg_interfaces:
+ if pg_if in self.pg_in_xc:
+ capture = pg_if.get_capture(
+ remark="interface is a cross-connect sink")
+ self.verify_capture(pg_if, capture)
+ elif pg_if in self.pg_not_in_xc:
+ pg_if.assert_nothing_captured(
+ remark="interface is not a cross-connect sink")
+ else:
+ raise Exception("Unexpected interface: %s" % pg_if.name)
+
+ def test_l2xc_inst_01(self):
+ """ L2XC Multi-instance test 1 - create 10 cross-connects
+ """
+ # Config 1
+ # Create 10 cross-connects
+ self.create_xconnects(10)
+
+ # Test 1
+ self.run_verify_test()
+
+ def test_l2xc_inst_02(self):
+ """ L2XC Multi-instance test 2 - delete 4 cross-connects
+ """
+ # Config 2
+ # Delete 4 cross-connects
+ self.delete_xconnects(4)
+
+ # Test 2
+ self.run_verify_test()
+
+ def test_l2xc_inst_03(self):
+ """ L2BD Multi-instance 3 - add new 4 cross-connects
+ """
+ # Config 3
+ # Add new 4 cross-connects
+ self.create_xconnects(4, start=10)
+
+ # Test 3
+ self.run_verify_test()
+
+ def test_l2xc_inst_04(self):
+ """ L2XC Multi-instance test 4 - delete 10 cross-connects
+ """
+ # Config 4
+ # Delete 10 cross-connects
+ self.delete_xconnects(10, start=4)
+
+ # Test 4
+ self.run_verify_test()
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/vpp/test/test_lb.py b/vpp/test/test_lb.py
new file mode 100644
index 00000000..9b5baaea
--- /dev/null
+++ b/vpp/test/test_lb.py
@@ -0,0 +1,217 @@
+import socket
+
+from scapy.layers.inet import IP, UDP
+from scapy.layers.inet6 import IPv6
+from scapy.layers.l2 import Ether, GRE
+from scapy.packet import Raw
+
+from framework import VppTestCase
+from util import ppp
+
+""" TestLB is a subclass of VPPTestCase classes.
+
+ TestLB class defines Load Balancer test cases for:
+ - IP4 to GRE4 encap
+ - IP4 to GRE6 encap
+ - IP6 to GRE4 encap
+ - IP6 to GRE6 encap
+
+ As stated in comments below, GRE has issues with IPv6.
+ All test cases involving IPv6 are executed, but
+ received packets are not parsed and checked.
+
+"""
+
+
+class TestLB(VppTestCase):
+ """ Load Balancer Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestLB, cls).setUpClass()
+
+ cls.ass = range(5)
+ cls.packets = range(100)
+
+ try:
+ cls.create_pg_interfaces(range(2))
+ cls.interfaces = list(cls.pg_interfaces)
+
+ for i in cls.interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.config_ip6()
+ i.disable_ipv6_ra()
+ i.resolve_arp()
+ i.resolve_ndp()
+ dst4 = socket.inet_pton(socket.AF_INET, "10.0.0.0")
+ dst6 = socket.inet_pton(socket.AF_INET6, "2002::")
+ cls.vapi.ip_add_del_route(dst4, 24, cls.pg1.remote_ip4n)
+ cls.vapi.ip_add_del_route(dst6, 16, cls.pg1.remote_ip6n, is_ipv6=1)
+ cls.vapi.cli("lb conf ip4-src-address 39.40.41.42")
+ cls.vapi.cli("lb conf ip6-src-address 2004::1")
+ except Exception:
+ super(TestLB, cls).tearDownClass()
+ raise
+
+ def tearDown(self):
+ super(TestLB, self).tearDown()
+ if not self.vpp_dead:
+ self.logger.info(self.vapi.cli("show lb vip verbose"))
+
+ def getIPv4Flow(self, id):
+ return (IP(dst="90.0.%u.%u" % (id / 255, id % 255),
+ src="40.0.%u.%u" % (id / 255, id % 255)) /
+ UDP(sport=10000 + id, dport=20000 + id))
+
+ def getIPv6Flow(self, id):
+ return (IPv6(dst="2001::%u" % (id), src="fd00:f00d:ffff::%u" % (id)) /
+ UDP(sport=10000 + id, dport=20000 + id))
+
+ def generatePackets(self, src_if, isv4):
+ self.packet_infos = {}
+ pkts = []
+ for pktid in self.packets:
+ info = self.create_packet_info(src_if.sw_if_index, pktid)
+ payload = self.info_to_payload(info)
+ ip = self.getIPv4Flow(pktid) if isv4 else self.getIPv6Flow(pktid)
+ packet = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ ip /
+ Raw(payload))
+ self.extend_packet(packet, 128)
+ info.data = packet.copy()
+ pkts.append(packet)
+ return pkts
+
+ def checkInner(self, gre, isv4):
+ IPver = IP if isv4 else IPv6
+ self.assertEqual(gre.proto, 0x0800 if isv4 else 0x86DD)
+ self.assertEqual(gre.flags, 0)
+ self.assertEqual(gre.version, 0)
+ inner = IPver(str(gre.payload))
+ payload_info = self.payload_to_info(str(inner[Raw]))
+ self.info = self.get_next_packet_info_for_interface2(
+ self.pg0.sw_if_index, payload_info.dst, self.info)
+ self.assertEqual(str(inner), str(self.info.data[IPver]))
+
+ def checkCapture(self, gre4, isv4):
+ self.pg0.assert_nothing_captured()
+ out = self.pg1.get_capture()
+ self.assertEqual(len(out), len(self.packets))
+
+ load = [0] * len(self.ass)
+ self.info = None
+ for p in out:
+ try:
+ asid = 0
+ gre = None
+ if gre4:
+ ip = p[IP]
+ asid = int(ip.dst.split(".")[3])
+ self.assertEqual(ip.version, 4)
+ self.assertEqual(ip.flags, 0)
+ self.assertEqual(ip.src, "39.40.41.42")
+ self.assertEqual(ip.dst, "10.0.0.%u" % asid)
+ self.assertEqual(ip.proto, 47)
+ self.assertEqual(len(ip.options), 0)
+ self.assertGreaterEqual(ip.ttl, 64)
+ gre = p[GRE]
+ else:
+ ip = p[IPv6]
+ asid = ip.dst.split(":")
+ asid = asid[len(asid) - 1]
+ asid = 0 if asid == "" else int(asid)
+ self.assertEqual(ip.version, 6)
+ self.assertEqual(ip.tc, 0)
+ self.assertEqual(ip.fl, 0)
+ self.assertEqual(ip.src, "2004::1")
+ self.assertEqual(
+ socket.inet_pton(socket.AF_INET6, ip.dst),
+ socket.inet_pton(socket.AF_INET6, "2002::%u" % asid)
+ )
+ self.assertEqual(ip.nh, 47)
+ self.assertGreaterEqual(ip.hlim, 64)
+ # self.assertEqual(len(ip.options), 0)
+ gre = GRE(str(p[IPv6].payload))
+ self.checkInner(gre, isv4)
+ load[asid] += 1
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # This is just to roughly check that the balancing algorithm
+ # is not completly biased.
+ for asid in self.ass:
+ if load[asid] < len(self.packets) / (len(self.ass) * 2):
+ self.log(
+ "ASS is not balanced: load[%d] = %d" % (asid, load[asid]))
+ raise Exception("Load Balancer algorithm is biased")
+
+ def test_lb_ip4_gre4(self):
+ """ Load Balancer IP4 GRE4 """
+ try:
+ self.vapi.cli("lb vip 90.0.0.0/8 encap gre4")
+ for asid in self.ass:
+ self.vapi.cli("lb as 90.0.0.0/8 10.0.0.%u" % (asid))
+
+ self.pg0.add_stream(self.generatePackets(self.pg0, isv4=True))
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.checkCapture(gre4=True, isv4=True)
+
+ finally:
+ for asid in self.ass:
+ self.vapi.cli("lb as 90.0.0.0/8 10.0.0.%u del" % (asid))
+ self.vapi.cli("lb vip 90.0.0.0/8 encap gre4 del")
+
+ def test_lb_ip6_gre4(self):
+ """ Load Balancer IP6 GRE4 """
+
+ try:
+ self.vapi.cli("lb vip 2001::/16 encap gre4")
+ for asid in self.ass:
+ self.vapi.cli("lb as 2001::/16 10.0.0.%u" % (asid))
+
+ self.pg0.add_stream(self.generatePackets(self.pg0, isv4=False))
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ self.checkCapture(gre4=True, isv4=False)
+ finally:
+ for asid in self.ass:
+ self.vapi.cli("lb as 2001::/16 10.0.0.%u del" % (asid))
+ self.vapi.cli("lb vip 2001::/16 encap gre4 del")
+
+ def test_lb_ip4_gre6(self):
+ """ Load Balancer IP4 GRE6 """
+ try:
+ self.vapi.cli("lb vip 90.0.0.0/8 encap gre6")
+ for asid in self.ass:
+ self.vapi.cli("lb as 90.0.0.0/8 2002::%u" % (asid))
+
+ self.pg0.add_stream(self.generatePackets(self.pg0, isv4=True))
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ self.checkCapture(gre4=False, isv4=True)
+ finally:
+ for asid in self.ass:
+ self.vapi.cli("lb as 90.0.0.0/8 2002::%u" % (asid))
+ self.vapi.cli("lb vip 90.0.0.0/8 encap gre6 del")
+
+ def test_lb_ip6_gre6(self):
+ """ Load Balancer IP6 GRE6 """
+ try:
+ self.vapi.cli("lb vip 2001::/16 encap gre6")
+ for asid in self.ass:
+ self.vapi.cli("lb as 2001::/16 2002::%u" % (asid))
+
+ self.pg0.add_stream(self.generatePackets(self.pg0, isv4=False))
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ self.checkCapture(gre4=False, isv4=False)
+ finally:
+ for asid in self.ass:
+ self.vapi.cli("lb as 2001::/16 2002::%u del" % (asid))
+ self.vapi.cli("lb vip 2001::/16 encap gre6 del")
diff --git a/vpp/test/test_mpls.py b/vpp/test/test_mpls.py
new file mode 100644
index 00000000..6d5eeb2b
--- /dev/null
+++ b/vpp/test/test_mpls.py
@@ -0,0 +1,743 @@
+#!/usr/bin/env python
+
+import unittest
+import socket
+
+from framework import VppTestCase, VppTestRunner
+from vpp_ip_route import IpRoute, RoutePath, MplsRoute, MplsIpBind
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, UDP, ICMP
+from scapy.layers.inet6 import IPv6
+from scapy.contrib.mpls import MPLS
+
+class TestMPLS(VppTestCase):
+ """ MPLS Test Case """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestMPLS, cls).setUpClass()
+
+ def setUp(self):
+ super(TestMPLS, self).setUp()
+
+ # create 2 pg interfaces
+ self.create_pg_interfaces(range(2))
+
+ # setup both interfaces
+ # assign them different tables.
+ table_id = 0
+
+ for i in self.pg_interfaces:
+ i.admin_up()
+ i.set_table_ip4(table_id)
+ i.set_table_ip6(table_id)
+ i.config_ip4()
+ i.resolve_arp()
+ i.config_ip6()
+ i.resolve_ndp()
+ i.enable_mpls()
+ table_id += 1
+
+ def tearDown(self):
+ super(TestMPLS, self).tearDown()
+
+ # the default of 64 matches the IP packet TTL default
+ def create_stream_labelled_ip4(self, src_if, mpls_labels, mpls_ttl=255, ping=0, ip_itf=None):
+ pkts = []
+ for i in range(0, 257):
+ info = self.create_packet_info(src_if.sw_if_index,
+ src_if.sw_if_index)
+ payload = self.info_to_payload(info)
+ p = Ether(dst=src_if.local_mac, src=src_if.remote_mac)
+
+ for ii in range(len(mpls_labels)):
+ if ii == len(mpls_labels) - 1:
+ p = p / MPLS(label=mpls_labels[ii], ttl=mpls_ttl, s=1)
+ else:
+ p = p / MPLS(label=mpls_labels[ii], ttl=mpls_ttl, s=0)
+ if not ping:
+ p = (p / IP(src=src_if.local_ip4, dst=src_if.remote_ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ else:
+ p = (p / IP(src=ip_itf.remote_ip4,
+ dst=ip_itf.local_ip4) /
+ ICMP())
+
+ info.data = p.copy()
+ pkts.append(p)
+ return pkts
+
+ def create_stream_ip4(self, src_if, dst_ip):
+ pkts = []
+ for i in range(0, 257):
+ info = self.create_packet_info(src_if.sw_if_index,
+ src_if.sw_if_index)
+ payload = self.info_to_payload(info)
+ p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ IP(src=src_if.remote_ip4, dst=dst_ip) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ info.data = p.copy()
+ pkts.append(p)
+ return pkts
+
+ def create_stream_labelled_ip6(self, src_if, mpls_label, mpls_ttl):
+ pkts = []
+ for i in range(0, 257):
+ info = self.create_packet_info(src_if.sw_if_index,
+ src_if.sw_if_index)
+ payload = self.info_to_payload(info)
+ p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ MPLS(label=mpls_label, ttl=mpls_ttl) /
+ IPv6(src=src_if.remote_ip6, dst=src_if.remote_ip6) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ info.data = p.copy()
+ pkts.append(p)
+ return pkts
+
+ @staticmethod
+ def verify_filter(capture, sent):
+ if not len(capture) == len(sent):
+ # filter out any IPv6 RAs from the capture
+ for p in capture:
+ if p.haslayer(IPv6):
+ capture.remove(p)
+ return capture
+
+ def verify_capture_ip4(self, src_if, capture, sent, ping_resp=0):
+ try:
+ capture = self.verify_filter(capture, sent)
+
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ tx = sent[i]
+ rx = capture[i]
+
+ # the rx'd packet has the MPLS label popped
+ eth = rx[Ether]
+ self.assertEqual(eth.type, 0x800)
+
+ tx_ip = tx[IP]
+ rx_ip = rx[IP]
+
+ if not ping_resp:
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+ # IP processing post pop has decremented the TTL
+ self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
+ else:
+ self.assertEqual(rx_ip.src, tx_ip.dst)
+ self.assertEqual(rx_ip.dst, tx_ip.src)
+
+ except:
+ raise
+
+ def verify_mpls_stack(self, rx, mpls_labels, ttl=255, num=0):
+ # the rx'd packet has the MPLS label popped
+ eth = rx[Ether]
+ self.assertEqual(eth.type, 0x8847)
+
+ rx_mpls = rx[MPLS]
+
+ for ii in range(len(mpls_labels)):
+ self.assertEqual(rx_mpls.label, mpls_labels[ii])
+ self.assertEqual(rx_mpls.cos, 0)
+ if ii == num:
+ self.assertEqual(rx_mpls.ttl, ttl)
+ else:
+ self.assertEqual(rx_mpls.ttl, 255)
+
+ if ii == len(mpls_labels) - 1:
+ self.assertEqual(rx_mpls.s, 1)
+ else:
+ # not end of stack
+ self.assertEqual(rx_mpls.s, 0)
+ # pop the label to expose the next
+ rx_mpls = rx_mpls[MPLS].payload
+
+ def verify_capture_labelled_ip4(self, src_if, capture, sent,
+ mpls_labels):
+ try:
+ capture = self.verify_filter(capture, sent)
+
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ tx = sent[i]
+ rx = capture[i]
+ tx_ip = tx[IP]
+ rx_ip = rx[IP]
+
+ # the MPLS TTL is copied from the IP
+ self.verify_mpls_stack(
+ rx, mpls_labels, rx_ip.ttl, len(mpls_labels) - 1)
+
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+ # IP processing post pop has decremented the TTL
+ self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
+
+ except:
+ raise
+
+ def verify_capture_tunneled_ip4(self, src_if, capture, sent, mpls_labels):
+ try:
+ capture = self.verify_filter(capture, sent)
+
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ tx = sent[i]
+ rx = capture[i]
+ tx_ip = tx[IP]
+ rx_ip = rx[IP]
+
+ # the MPLS TTL is 255 since it enters a new tunnel
+ self.verify_mpls_stack(
+ rx, mpls_labels, 255, len(mpls_labels) - 1)
+
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+ # IP processing post pop has decremented the TTL
+ self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
+
+ except:
+ raise
+
+ def verify_capture_labelled(self, src_if, capture, sent,
+ mpls_labels, ttl=254, num=0):
+ try:
+ capture = self.verify_filter(capture, sent)
+
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ rx = capture[i]
+ self.verify_mpls_stack(rx, mpls_labels, ttl, num)
+ except:
+ raise
+
+ def verify_capture_ip6(self, src_if, capture, sent):
+ try:
+ self.assertEqual(len(capture), len(sent))
+
+ for i in range(len(capture)):
+ tx = sent[i]
+ rx = capture[i]
+
+ # the rx'd packet has the MPLS label popped
+ eth = rx[Ether]
+ self.assertEqual(eth.type, 0x86DD)
+
+ tx_ip = tx[IPv6]
+ rx_ip = rx[IPv6]
+
+ self.assertEqual(rx_ip.src, tx_ip.src)
+ self.assertEqual(rx_ip.dst, tx_ip.dst)
+ # IP processing post pop has decremented the TTL
+ self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim)
+
+ except:
+ raise
+
+ def test_swap(self):
+ """ MPLS label swap tests """
+
+ #
+ # A simple MPLS xconnect - eos label in label out
+ #
+ route_32_eos = MplsRoute(self, 32, 1,
+ [RoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[33])])
+ route_32_eos.add_vpp_config()
+
+ #
+ # a stream that matches the route for 10.0.0.1
+ # PG0 is in the default table
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip4(self.pg0, [32])
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx, [33])
+
+ #
+ # A simple MPLS xconnect - non-eos label in label out
+ #
+ route_32_neos = MplsRoute(self, 32, 0,
+ [RoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[33])])
+ route_32_neos.add_vpp_config()
+
+ #
+ # a stream that matches the route for 10.0.0.1
+ # PG0 is in the default table
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip4(self.pg0, [32, 99])
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_labelled(self.pg0, rx, tx, [33, 99])
+
+ #
+ # An MPLS xconnect - EOS label in IP out
+ #
+ route_33_eos = MplsRoute(self, 33, 1,
+ [RoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[])])
+ route_33_eos.add_vpp_config()
+
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip4(self.pg0, [33])
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_ip4(self.pg0, rx, tx)
+
+ #
+ # An MPLS xconnect - non-EOS label in IP out - an invalid configuration
+ # so this traffic should be dropped.
+ #
+ route_33_neos = MplsRoute(self, 33, 0,
+ [RoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[])])
+ route_33_neos.add_vpp_config()
+
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip4(self.pg0, [33, 99])
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg0.assert_nothing_captured(
+ remark="MPLS non-EOS packets popped and forwarded")
+
+ #
+ # A recursive EOS x-connect, which resolves through another x-connect
+ #
+ route_34_eos = MplsRoute(self, 34, 1,
+ [RoutePath("0.0.0.0",
+ 0xffffffff,
+ nh_via_label=32,
+ labels=[44, 45])])
+ route_34_eos.add_vpp_config()
+
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip4(self.pg0, [34])
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx, [33, 44, 45])
+
+ #
+ # A recursive non-EOS x-connect, which resolves through another
+ # x-connect
+ #
+ route_34_neos = MplsRoute(self, 34, 0,
+ [RoutePath("0.0.0.0",
+ 0xffffffff,
+ nh_via_label=32,
+ labels=[44, 46])])
+ route_34_neos.add_vpp_config()
+
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip4(self.pg0, [34, 99])
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ # it's the 2nd (counting from 0) label in the stack that is swapped
+ self.verify_capture_labelled(self.pg0, rx, tx, [33, 44, 46, 99], num=2)
+
+ #
+ # an recursive IP route that resolves through the recursive non-eos
+ # x-connect
+ #
+ ip_10_0_0_1 = IpRoute(self, "10.0.0.1", 32,
+ [RoutePath("0.0.0.0",
+ 0xffffffff,
+ nh_via_label=34,
+ labels=[55])])
+ ip_10_0_0_1.add_vpp_config()
+
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx, [33, 44, 46, 55])
+
+ ip_10_0_0_1.remove_vpp_config()
+ route_34_neos.remove_vpp_config()
+ route_34_eos.remove_vpp_config()
+ route_33_neos.remove_vpp_config()
+ route_33_eos.remove_vpp_config()
+ route_32_neos.remove_vpp_config()
+ route_32_eos.remove_vpp_config()
+
+ def test_bind(self):
+ """ MPLS Local Label Binding test """
+
+ #
+ # Add a non-recursive route with a single out label
+ #
+ route_10_0_0_1 = IpRoute(self, "10.0.0.1", 32,
+ [RoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[45])])
+ route_10_0_0_1.add_vpp_config()
+
+ # bind a local label to the route
+ binding = MplsIpBind(self, 44, "10.0.0.1", 32)
+ binding.add_vpp_config()
+
+ # non-EOS stream
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip4(self.pg0, [44, 99])
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_labelled(self.pg0, rx, tx, [45, 99])
+
+ # EOS stream
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip4(self.pg0, [44])
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_labelled(self.pg0, rx, tx, [45])
+
+ # IP stream
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx, [45])
+
+ #
+ # cleanup
+ #
+ binding.remove_vpp_config()
+ route_10_0_0_1.remove_vpp_config()
+
+ def test_imposition(self):
+ """ MPLS label imposition test """
+
+ #
+ # Add a non-recursive route with a single out label
+ #
+ route_10_0_0_1 = IpRoute(self, "10.0.0.1", 32,
+ [RoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[32])])
+ route_10_0_0_1.add_vpp_config()
+
+ #
+ # a stream that matches the route for 10.0.0.1
+ # PG0 is in the default table
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_ip4(self.pg0, "10.0.0.1")
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx, [32])
+
+ #
+ # Add a non-recursive route with a 3 out labels
+ #
+ route_10_0_0_2 = IpRoute(self, "10.0.0.2", 32,
+ [RoutePath(self.pg0.remote_ip4,
+ self.pg0.sw_if_index,
+ labels=[32, 33, 34])])
+ route_10_0_0_2.add_vpp_config()
+
+ #
+ # a stream that matches the route for 10.0.0.1
+ # PG0 is in the default table
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_ip4(self.pg0, "10.0.0.2")
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx, [32, 33, 34])
+
+ #
+ # add a recursive path, with output label, via the 1 label route
+ #
+ route_11_0_0_1 = IpRoute(self, "11.0.0.1", 32,
+ [RoutePath("10.0.0.1",
+ 0xffffffff,
+ labels=[44])])
+ route_11_0_0_1.add_vpp_config()
+
+ #
+ # a stream that matches the route for 11.0.0.1, should pick up
+ # the label stack for 11.0.0.1 and 10.0.0.1
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_ip4(self.pg0, "11.0.0.1")
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_labelled_ip4(self.pg0, rx, tx, [32, 44])
+
+ #
+ # add a recursive path, with 2 labels, via the 3 label route
+ #
+ route_11_0_0_2 = IpRoute(self, "11.0.0.2", 32,
+ [RoutePath("10.0.0.2",
+ 0xffffffff,
+ labels=[44, 45])])
+ route_11_0_0_2.add_vpp_config()
+
+ #
+ # a stream that matches the route for 11.0.0.1, should pick up
+ # the label stack for 11.0.0.1 and 10.0.0.1
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_ip4(self.pg0, "11.0.0.2")
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_labelled_ip4(
+ self.pg0, rx, tx, [32, 33, 34, 44, 45])
+
+ #
+ # cleanup
+ #
+ route_11_0_0_2.remove_vpp_config()
+ route_11_0_0_1.remove_vpp_config()
+ route_10_0_0_2.remove_vpp_config()
+ route_10_0_0_1.remove_vpp_config()
+
+ def test_tunnel(self):
+ """ MPLS Tunnel Tests """
+
+ #
+ # Create a tunnel with a single out label
+ #
+ nh_addr = socket.inet_pton(socket.AF_INET, self.pg0.remote_ip4)
+
+ reply = self.vapi.mpls_tunnel_add_del(
+ 0xffffffff, # don't know the if index yet
+ 1, # IPv4 next-hop
+ nh_addr,
+ self.pg0.sw_if_index,
+ 0, # next-hop-table-id
+ 1, # next-hop-weight
+ 2, # num-out-labels,
+ [44, 46])
+ self.vapi.sw_interface_set_flags(reply.sw_if_index, admin_up_down=1)
+
+ #
+ # add an unlabelled route through the new tunnel
+ #
+ dest_addr = socket.inet_pton(socket.AF_INET, "10.0.0.3")
+ nh_addr = socket.inet_pton(socket.AF_INET, "0.0.0.0")
+ dest_addr_len = 32
+
+ self.vapi.ip_add_del_route(
+ dest_addr,
+ dest_addr_len,
+ nh_addr, # all zeros next-hop - tunnel is p2p
+ reply.sw_if_index, # sw_if_index of the new tunnel
+ 0, # table-id
+ 0, # next-hop-table-id
+ 1, # next-hop-weight
+ 0, # num-out-labels,
+ []) # out-label
+
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_ip4(self.pg0, "10.0.0.3")
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_tunneled_ip4(self.pg0, rx, tx, [44, 46])
+
+ def test_v4_exp_null(self):
+ """ MPLS V4 Explicit NULL test """
+
+ #
+ # The first test case has an MPLS TTL of 0
+ # all packet should be dropped
+ #
+ tx = self.create_stream_labelled_ip4(self.pg0, [0], 0)
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ self.pg0.assert_nothing_captured(remark="MPLS TTL=0 packets forwarded")
+
+ #
+ # a stream with a non-zero MPLS TTL
+ # PG0 is in the default table
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip4(self.pg0, [0])
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_ip4(self.pg0, rx, tx)
+
+ #
+ # a stream with a non-zero MPLS TTL
+ # PG1 is in table 1
+ # we are ensuring the post-pop lookup occurs in the VRF table
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip4(self.pg1, [0])
+ self.pg1.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg1.get_capture()
+ self.verify_capture_ip4(self.pg0, rx, tx)
+
+ def test_v6_exp_null(self):
+ """ MPLS V6 Explicit NULL test """
+
+ #
+ # a stream with a non-zero MPLS TTL
+ # PG0 is in the default table
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip6(self.pg0, 2, 2)
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_ip6(self.pg0, rx, tx)
+
+ #
+ # a stream with a non-zero MPLS TTL
+ # PG1 is in table 1
+ # we are ensuring the post-pop lookup occurs in the VRF table
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip6(self.pg1, 2, 2)
+ self.pg1.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg1.get_capture()
+ self.verify_capture_ip6(self.pg0, rx, tx)
+
+ def test_deag(self):
+ """ MPLS Deagg """
+
+ #
+ # A de-agg route - next-hop lookup in default table
+ #
+ route_34_eos = MplsRoute(self, 34, 1,
+ [RoutePath("0.0.0.0",
+ 0xffffffff,
+ nh_table_id=0)])
+ route_34_eos.add_vpp_config()
+
+ #
+ # ping an interface in the default table
+ # PG0 is in the default table
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip4(self.pg0, [34], ping=1,
+ ip_itf=self.pg0)
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg0.get_capture()
+ self.verify_capture_ip4(self.pg0, rx, tx, ping_resp=1)
+
+ #
+ # A de-agg route - next-hop lookup in non-default table
+ #
+ route_35_eos = MplsRoute(self, 35, 1,
+ [RoutePath("0.0.0.0",
+ 0xffffffff,
+ nh_table_id=1)])
+ route_35_eos.add_vpp_config()
+
+ #
+ # ping an interface in the non-default table
+ # PG0 is in the default table. packet arrive labelled in the
+ # default table and egress unlabelled in the non-default
+ #
+ self.vapi.cli("clear trace")
+ tx = self.create_stream_labelled_ip4(self.pg0, [35], ping=1, ip_itf=self.pg1)
+ self.pg0.add_stream(tx)
+
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ rx = self.pg1.get_capture()
+ self.verify_capture_ip4(self.pg1, rx, tx, ping_resp=1)
+
+ route_35_eos.remove_vpp_config()
+ route_34_eos.remove_vpp_config()
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/vpp/test/test_snat.py b/vpp/test/test_snat.py
new file mode 100644
index 00000000..8985c3e4
--- /dev/null
+++ b/vpp/test/test_snat.py
@@ -0,0 +1,626 @@
+#!/usr/bin/env python
+
+import socket
+import unittest
+
+from framework import VppTestCase, VppTestRunner
+
+from scapy.layers.inet import IP, TCP, UDP, ICMP
+from scapy.layers.l2 import Ether
+from util import ppp
+
+
+class TestSNAT(VppTestCase):
+ """ SNAT Test Cases """
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestSNAT, cls).setUpClass()
+
+ try:
+ cls.tcp_port_in = 6303
+ cls.tcp_port_out = 6303
+ cls.udp_port_in = 6304
+ cls.udp_port_out = 6304
+ cls.icmp_id_in = 6305
+ cls.icmp_id_out = 6305
+ cls.snat_addr = '10.0.0.3'
+
+ cls.create_pg_interfaces(range(7))
+ cls.interfaces = list(cls.pg_interfaces[0:4])
+
+ for i in cls.interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+
+ cls.pg0.generate_remote_hosts(2)
+ cls.pg0.configure_ipv4_neighbors()
+
+ cls.overlapping_interfaces = list(list(cls.pg_interfaces[4:7]))
+
+ for i in cls.overlapping_interfaces:
+ i._local_ip4 = "172.16.255.1"
+ i._local_ip4n = socket.inet_pton(socket.AF_INET, i.local_ip4)
+ i._remote_hosts[0]._ip4 = "172.16.255.2"
+ i.set_table_ip4(i.sw_if_index)
+ i.config_ip4()
+ i.admin_up()
+ i.resolve_arp()
+
+ except Exception:
+ super(TestSNAT, cls).tearDownClass()
+ raise
+
+ def create_stream_in(self, in_if, out_if):
+ """
+ Create packet stream for inside network
+
+ :param in_if: Inside interface
+ :param out_if: Outside interface
+ """
+ pkts = []
+ # TCP
+ p = (Ether(dst=in_if.local_mac, src=in_if.remote_mac) /
+ IP(src=in_if.remote_ip4, dst=out_if.remote_ip4) /
+ TCP(sport=self.tcp_port_in))
+ pkts.append(p)
+
+ # UDP
+ p = (Ether(dst=in_if.local_mac, src=in_if.remote_mac) /
+ IP(src=in_if.remote_ip4, dst=out_if.remote_ip4) /
+ UDP(sport=self.udp_port_in))
+ pkts.append(p)
+
+ # ICMP
+ p = (Ether(dst=in_if.local_mac, src=in_if.remote_mac) /
+ IP(src=in_if.remote_ip4, dst=out_if.remote_ip4) /
+ ICMP(id=self.icmp_id_in, type='echo-request'))
+ pkts.append(p)
+
+ return pkts
+
+ def create_stream_out(self, out_if, dst_ip=None):
+ """
+ Create packet stream for outside network
+
+ :param out_if: Outside interface
+ :param dst_ip: Destination IP address (Default use global SNAT address)
+ """
+ if dst_ip is None:
+ dst_ip = self.snat_addr
+ pkts = []
+ # TCP
+ p = (Ether(dst=out_if.local_mac, src=out_if.remote_mac) /
+ IP(src=out_if.remote_ip4, dst=dst_ip) /
+ TCP(dport=self.tcp_port_out))
+ pkts.append(p)
+
+ # UDP
+ p = (Ether(dst=out_if.local_mac, src=out_if.remote_mac) /
+ IP(src=out_if.remote_ip4, dst=dst_ip) /
+ UDP(dport=self.udp_port_out))
+ pkts.append(p)
+
+ # ICMP
+ p = (Ether(dst=out_if.local_mac, src=out_if.remote_mac) /
+ IP(src=out_if.remote_ip4, dst=dst_ip) /
+ ICMP(id=self.icmp_id_out, type='echo-reply'))
+ pkts.append(p)
+
+ return pkts
+
+ def verify_capture_out(self, capture, nat_ip=None, same_port=False,
+ packet_num=3):
+ """
+ Verify captured packets on outside network
+
+ :param capture: Captured packets
+ :param nat_ip: Translated IP address (Default use global SNAT address)
+ :param same_port: Sorce port number is not translated (Default False)
+ :param packet_num: Expected number of packets (Default 3)
+ """
+ if nat_ip is None:
+ nat_ip = self.snat_addr
+ self.assertEqual(packet_num, len(capture))
+ for packet in capture:
+ try:
+ self.assertEqual(packet[IP].src, nat_ip)
+ if packet.haslayer(TCP):
+ if same_port:
+ self.assertEqual(packet[TCP].sport, self.tcp_port_in)
+ else:
+ self.assertNotEqual(packet[TCP].sport, self.tcp_port_in)
+ self.tcp_port_out = packet[TCP].sport
+ elif packet.haslayer(UDP):
+ if same_port:
+ self.assertEqual(packet[UDP].sport, self.udp_port_in)
+ else:
+ self.assertNotEqual(packet[UDP].sport, self.udp_port_in)
+ self.udp_port_out = packet[UDP].sport
+ else:
+ if same_port:
+ self.assertEqual(packet[ICMP].id, self.icmp_id_in)
+ else:
+ self.assertNotEqual(packet[ICMP].id, self.icmp_id_in)
+ self.icmp_id_out = packet[ICMP].id
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet "
+ "(outside network):", packet))
+ raise
+
+ def verify_capture_in(self, capture, in_if, packet_num=3):
+ """
+ Verify captured packets on inside network
+
+ :param capture: Captured packets
+ :param in_if: Inside interface
+ :param packet_num: Expected number of packets (Default 3)
+ """
+ self.assertEqual(packet_num, len(capture))
+ for packet in capture:
+ try:
+ self.assertEqual(packet[IP].dst, in_if.remote_ip4)
+ if packet.haslayer(TCP):
+ self.assertEqual(packet[TCP].dport, self.tcp_port_in)
+ elif packet.haslayer(UDP):
+ self.assertEqual(packet[UDP].dport, self.udp_port_in)
+ else:
+ self.assertEqual(packet[ICMP].id, self.icmp_id_in)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet "
+ "(inside network):", packet))
+ raise
+
+ def clear_snat(self):
+ """
+ Clear SNAT configuration.
+ """
+ interfaces = self.vapi.snat_interface_dump()
+ for intf in interfaces:
+ self.vapi.snat_interface_add_del_feature(intf.sw_if_index,
+ intf.is_inside,
+ is_add=0)
+
+ static_mappings = self.vapi.snat_static_mapping_dump()
+ for sm in static_mappings:
+ self.vapi.snat_add_static_mapping(sm.local_ip_address,
+ sm.external_ip_address,
+ local_port=sm.local_port,
+ external_port=sm.external_port,
+ addr_only=sm.addr_only,
+ vrf_id=sm.vrf_id,
+ is_add=0)
+
+ adresses = self.vapi.snat_address_dump()
+ for addr in adresses:
+ self.vapi.snat_add_address_range(addr.ip_address,
+ addr.ip_address,
+ is_add=0)
+
+ def snat_add_static_mapping(self, local_ip, external_ip, local_port=0,
+ external_port=0, vrf_id=0, is_add=1):
+ """
+ Add/delete S-NAT static mapping
+
+ :param local_ip: Local IP address
+ :param external_ip: External IP address
+ :param local_port: Local port number (Optional)
+ :param external_port: External port number (Optional)
+ :param vrf_id: VRF ID (Default 0)
+ :param is_add: 1 if add, 0 if delete (Default add)
+ """
+ addr_only = 1
+ if local_port and external_port:
+ addr_only = 0
+ l_ip = socket.inet_pton(socket.AF_INET, local_ip)
+ e_ip = socket.inet_pton(socket.AF_INET, external_ip)
+ self.vapi.snat_add_static_mapping(l_ip, e_ip, local_port, external_port,
+ addr_only, vrf_id, is_add)
+
+ def snat_add_address(self, ip, is_add=1):
+ """
+ Add/delete S-NAT address
+
+ :param ip: IP address
+ :param is_add: 1 if add, 0 if delete (Default add)
+ """
+ snat_addr = socket.inet_pton(socket.AF_INET, ip)
+ self.vapi.snat_add_address_range(snat_addr, snat_addr, is_add)
+
+ def test_dynamic(self):
+ """ SNAT dynamic translation test """
+
+ self.snat_add_address(self.snat_addr)
+ self.vapi.snat_interface_add_del_feature(self.pg0.sw_if_index)
+ self.vapi.snat_interface_add_del_feature(self.pg1.sw_if_index,
+ is_inside=0)
+
+ # in2out
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture()
+ self.verify_capture_out(capture)
+
+ # out2in
+ pkts = self.create_stream_out(self.pg1)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture()
+ self.verify_capture_in(capture, self.pg0)
+
+ def test_static_in(self):
+ """ SNAT 1:1 NAT initialized from inside network """
+
+ nat_ip = "10.0.0.10"
+ self.tcp_port_out = 6303
+ self.udp_port_out = 6304
+ self.icmp_id_out = 6305
+
+ self.snat_add_static_mapping(self.pg0.remote_ip4, nat_ip)
+ self.vapi.snat_interface_add_del_feature(self.pg0.sw_if_index)
+ self.vapi.snat_interface_add_del_feature(self.pg1.sw_if_index,
+ is_inside=0)
+
+ # in2out
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture()
+ self.verify_capture_out(capture, nat_ip, True)
+
+ # out2in
+ pkts = self.create_stream_out(self.pg1, nat_ip)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture()
+ self.verify_capture_in(capture, self.pg0)
+
+ def test_static_out(self):
+ """ SNAT 1:1 NAT initialized from outside network """
+
+ nat_ip = "10.0.0.20"
+ self.tcp_port_out = 6303
+ self.udp_port_out = 6304
+ self.icmp_id_out = 6305
+
+ self.snat_add_static_mapping(self.pg0.remote_ip4, nat_ip)
+ self.vapi.snat_interface_add_del_feature(self.pg0.sw_if_index)
+ self.vapi.snat_interface_add_del_feature(self.pg1.sw_if_index,
+ is_inside=0)
+
+ # out2in
+ pkts = self.create_stream_out(self.pg1, nat_ip)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture()
+ self.verify_capture_in(capture, self.pg0)
+
+ # in2out
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture()
+ self.verify_capture_out(capture, nat_ip, True)
+
+ def test_static_with_port_in(self):
+ """ SNAT 1:1 NAT with port initialized from inside network """
+
+ self.tcp_port_out = 3606
+ self.udp_port_out = 3607
+ self.icmp_id_out = 3608
+
+ self.snat_add_address(self.snat_addr)
+ self.snat_add_static_mapping(self.pg0.remote_ip4, self.snat_addr,
+ self.tcp_port_in, self.tcp_port_out)
+ self.snat_add_static_mapping(self.pg0.remote_ip4, self.snat_addr,
+ self.udp_port_in, self.udp_port_out)
+ self.snat_add_static_mapping(self.pg0.remote_ip4, self.snat_addr,
+ self.icmp_id_in, self.icmp_id_out)
+ self.vapi.snat_interface_add_del_feature(self.pg0.sw_if_index)
+ self.vapi.snat_interface_add_del_feature(self.pg1.sw_if_index,
+ is_inside=0)
+
+ # in2out
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture()
+ self.verify_capture_out(capture)
+
+ # out2in
+ pkts = self.create_stream_out(self.pg1)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture()
+ self.verify_capture_in(capture, self.pg0)
+
+ def test_static_with_port_out(self):
+ """ SNAT 1:1 NAT with port initialized from outside network """
+
+ self.tcp_port_out = 30606
+ self.udp_port_out = 30607
+ self.icmp_id_out = 30608
+
+ self.snat_add_address(self.snat_addr)
+ self.snat_add_static_mapping(self.pg0.remote_ip4, self.snat_addr,
+ self.tcp_port_in, self.tcp_port_out)
+ self.snat_add_static_mapping(self.pg0.remote_ip4, self.snat_addr,
+ self.udp_port_in, self.udp_port_out)
+ self.snat_add_static_mapping(self.pg0.remote_ip4, self.snat_addr,
+ self.icmp_id_in, self.icmp_id_out)
+ self.vapi.snat_interface_add_del_feature(self.pg0.sw_if_index)
+ self.vapi.snat_interface_add_del_feature(self.pg1.sw_if_index,
+ is_inside=0)
+
+ # out2in
+ pkts = self.create_stream_out(self.pg1)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture()
+ self.verify_capture_in(capture, self.pg0)
+
+ # in2out
+ pkts = self.create_stream_in(self.pg0, self.pg1)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture()
+ self.verify_capture_out(capture)
+
+ def test_static_vrf_aware(self):
+ """ SNAT 1:1 NAT VRF awareness """
+
+ nat_ip1 = "10.0.0.30"
+ nat_ip2 = "10.0.0.40"
+ self.tcp_port_out = 6303
+ self.udp_port_out = 6304
+ self.icmp_id_out = 6305
+
+ self.snat_add_static_mapping(self.pg4.remote_ip4, nat_ip1,
+ vrf_id=self.pg4.sw_if_index)
+ self.snat_add_static_mapping(self.pg0.remote_ip4, nat_ip2,
+ vrf_id=self.pg4.sw_if_index)
+ self.vapi.snat_interface_add_del_feature(self.pg3.sw_if_index,
+ is_inside=0)
+ self.vapi.snat_interface_add_del_feature(self.pg0.sw_if_index)
+ self.vapi.snat_interface_add_del_feature(self.pg4.sw_if_index)
+
+ # inside interface VRF match SNAT static mapping VRF
+ pkts = self.create_stream_in(self.pg4, self.pg3)
+ self.pg4.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg3.get_capture()
+ self.verify_capture_out(capture, nat_ip1, True)
+
+ # inside interface VRF don't match SNAT static mapping VRF (packets
+ # are dropped)
+ pkts = self.create_stream_in(self.pg0, self.pg3)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg3.assert_nothing_captured()
+
+ def test_multiple_inside_interfaces(self):
+ """SNAT multiple inside interfaces with non-overlapping address space"""
+
+ self.snat_add_address(self.snat_addr)
+ self.vapi.snat_interface_add_del_feature(self.pg0.sw_if_index)
+ self.vapi.snat_interface_add_del_feature(self.pg1.sw_if_index)
+ self.vapi.snat_interface_add_del_feature(self.pg2.sw_if_index)
+ self.vapi.snat_interface_add_del_feature(self.pg3.sw_if_index,
+ is_inside=0)
+
+ # in2out 1st interface
+ pkts = self.create_stream_in(self.pg0, self.pg3)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg3.get_capture()
+ self.verify_capture_out(capture)
+
+ # out2in 1st interface
+ pkts = self.create_stream_out(self.pg3)
+ self.pg3.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture()
+ self.verify_capture_in(capture, self.pg0)
+
+ # in2out 2nd interface
+ pkts = self.create_stream_in(self.pg1, self.pg3)
+ self.pg1.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg3.get_capture()
+ self.verify_capture_out(capture)
+
+ # out2in 2nd interface
+ pkts = self.create_stream_out(self.pg3)
+ self.pg3.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg1.get_capture()
+ self.verify_capture_in(capture, self.pg1)
+
+ # in2out 3rd interface
+ pkts = self.create_stream_in(self.pg2, self.pg3)
+ self.pg2.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg3.get_capture()
+ self.verify_capture_out(capture)
+
+ # out2in 3rd interface
+ pkts = self.create_stream_out(self.pg3)
+ self.pg3.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg2.get_capture()
+ self.verify_capture_in(capture, self.pg2)
+
+ def test_inside_overlapping_interfaces(self):
+ """ SNAT multiple inside interfaces with overlapping address space """
+
+ self.snat_add_address(self.snat_addr)
+ self.vapi.snat_interface_add_del_feature(self.pg3.sw_if_index,
+ is_inside=0)
+ self.vapi.snat_interface_add_del_feature(self.pg4.sw_if_index)
+ self.vapi.snat_interface_add_del_feature(self.pg5.sw_if_index)
+ self.vapi.snat_interface_add_del_feature(self.pg6.sw_if_index)
+
+ # in2out 1st interface
+ pkts = self.create_stream_in(self.pg4, self.pg3)
+ self.pg4.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg3.get_capture()
+ self.verify_capture_out(capture)
+
+ # out2in 1st interface
+ pkts = self.create_stream_out(self.pg3)
+ self.pg3.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg4.get_capture()
+ self.verify_capture_in(capture, self.pg4)
+
+ # in2out 2nd interface
+ pkts = self.create_stream_in(self.pg5, self.pg3)
+ self.pg5.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg3.get_capture()
+ self.verify_capture_out(capture)
+
+ # out2in 2nd interface
+ pkts = self.create_stream_out(self.pg3)
+ self.pg3.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg5.get_capture()
+ self.verify_capture_in(capture, self.pg5)
+
+ # in2out 3rd interface
+ pkts = self.create_stream_in(self.pg6, self.pg3)
+ self.pg6.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg3.get_capture()
+ self.verify_capture_out(capture)
+
+ # out2in 3rd interface
+ pkts = self.create_stream_out(self.pg3)
+ self.pg3.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg6.get_capture()
+ self.verify_capture_in(capture, self.pg6)
+
+ def test_hairpinning(self):
+ """ SNAT hairpinning """
+
+ host = self.pg0.remote_hosts[0]
+ server = self.pg0.remote_hosts[1]
+ host_in_port = 1234
+ host_out_port = 0
+ server_in_port = 5678
+ server_out_port = 8765
+
+ self.snat_add_address(self.snat_addr)
+ self.vapi.snat_interface_add_del_feature(self.pg0.sw_if_index)
+ self.vapi.snat_interface_add_del_feature(self.pg1.sw_if_index,
+ is_inside=0)
+ # add static mapping for server
+ self.snat_add_static_mapping(server.ip4, self.snat_addr,
+ server_in_port, server_out_port)
+
+ # send packet from host to server
+ p = (Ether(src=host.mac, dst=self.pg0.local_mac) /
+ IP(src=host.ip4, dst=self.snat_addr) /
+ TCP(sport=host_in_port, dport=server_out_port))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture()
+ self.assertEqual(1, len(capture))
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.snat_addr)
+ self.assertEqual(ip.dst, server.ip4)
+ self.assertNotEqual(tcp.sport, host_in_port)
+ self.assertEqual(tcp.dport, server_in_port)
+ host_out_port = tcp.sport
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:", p))
+ raise
+
+ # send reply from server to host
+ p = (Ether(src=server.mac, dst=self.pg0.local_mac) /
+ IP(src=server.ip4, dst=self.snat_addr) /
+ TCP(sport=server_in_port, dport=host_out_port))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ capture = self.pg0.get_capture()
+ self.assertEqual(1, len(capture))
+ p = capture[0]
+ try:
+ ip = p[IP]
+ tcp = p[TCP]
+ self.assertEqual(ip.src, self.snat_addr)
+ self.assertEqual(ip.dst, host.ip4)
+ self.assertEqual(tcp.sport, server_out_port)
+ self.assertEqual(tcp.dport, host_in_port)
+ except:
+ self.logger.error(ppp("Unexpected or invalid packet:"), p)
+ raise
+
+ def test_max_translations_per_user(self):
+ """ MAX translations per user - recycle the least recently used """
+
+ self.snat_add_address(self.snat_addr)
+ self.vapi.snat_interface_add_del_feature(self.pg0.sw_if_index)
+ self.vapi.snat_interface_add_del_feature(self.pg1.sw_if_index,
+ is_inside=0)
+
+ # get maximum number of translations per user
+ snat_config = self.vapi.snat_show_config()
+
+ # send more than maximum number of translations per user packets
+ pkts_num = snat_config.max_translations_per_user + 5
+ pkts = []
+ for port in range(0, pkts_num):
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=1025 + port))
+ pkts.append(p)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # verify number of translated packet
+ capture = self.pg1.get_capture()
+ self.assertEqual(pkts_num, len(capture))
+
+ def tearDown(self):
+ super(TestSNAT, self).tearDown()
+ if not self.vpp_dead:
+ self.logger.info(self.vapi.cli("show snat verbose"))
+ self.clear_snat()
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/vpp/test/test_span.py b/vpp/test/test_span.py
new file mode 100644
index 00000000..e42fbd77
--- /dev/null
+++ b/vpp/test/test_span.py
@@ -0,0 +1,193 @@
+#!/usr/bin/env python
+
+import unittest
+
+from scapy.packet import Raw
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, UDP
+
+from framework import VppTestCase, VppTestRunner
+from util import Host, ppp
+
+
+class TestSpan(VppTestCase):
+ """ SPAN Test Case """
+
+ # Test variables
+ hosts_nr = 10 # Number of hosts
+ pkts_per_burst = 257 # Number of packets per burst
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestSpan, cls).setUpClass()
+
+ def setUp(self):
+ super(TestSpan, self).setUp()
+
+ # create 3 pg interfaces
+ self.create_pg_interfaces(range(3))
+
+ # packet flows mapping pg0 -> pg1, pg2 -> pg3, etc.
+ self.flows = dict()
+ self.flows[self.pg0] = [self.pg1]
+
+ # packet sizes
+ self.pg_if_packet_sizes = [64, 512] # , 1518, 9018]
+
+ self.interfaces = list(self.pg_interfaces)
+
+ # Create host MAC and IPv4 lists
+ # self.MY_MACS = dict()
+ # self.MY_IP4S = dict()
+ self.create_host_lists(TestSpan.hosts_nr)
+
+ # Create bi-directional cross-connects between pg0 and pg1
+ self.vapi.sw_interface_set_l2_xconnect(
+ self.pg0.sw_if_index, self.pg1.sw_if_index, enable=1)
+ self.vapi.sw_interface_set_l2_xconnect(
+ self.pg1.sw_if_index, self.pg0.sw_if_index, enable=1)
+
+ # setup all interfaces
+ for i in self.interfaces:
+ i.admin_up()
+ i.config_ip4()
+ i.resolve_arp()
+
+ # Enable SPAN on pg0 (mirrored to pg2)
+ self.vapi.sw_interface_span_enable_disable(
+ self.pg0.sw_if_index, self.pg2.sw_if_index)
+
+ def tearDown(self):
+ super(TestSpan, self).tearDown()
+
+ def create_host_lists(self, count):
+ """ Method to create required number of MAC and IPv4 addresses.
+ Create required number of host MAC addresses and distribute them among
+ interfaces. Create host IPv4 address for every host MAC address too.
+
+ :param count: Number of hosts to create MAC and IPv4 addresses for.
+ """
+ # mapping between packet-generator index and lists of test hosts
+ self.hosts_by_pg_idx = dict()
+
+ for pg_if in self.pg_interfaces:
+ # self.MY_MACS[i.sw_if_index] = []
+ # self.MY_IP4S[i.sw_if_index] = []
+ self.hosts_by_pg_idx[pg_if.sw_if_index] = []
+ hosts = self.hosts_by_pg_idx[pg_if.sw_if_index]
+ for j in range(0, count):
+ host = Host(
+ "00:00:00:ff:%02x:%02x" % (pg_if.sw_if_index, j),
+ "172.17.1%02x.%u" % (pg_if.sw_if_index, j))
+ hosts.append(host)
+
+ def create_stream(self, src_if, packet_sizes):
+ pkts = []
+ for i in range(0, TestSpan.pkts_per_burst):
+ dst_if = self.flows[src_if][0]
+ pkt_info = self.create_packet_info(
+ src_if.sw_if_index, dst_if.sw_if_index)
+ payload = self.info_to_payload(pkt_info)
+ p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
+ IP(src=src_if.remote_ip4, dst=dst_if.remote_ip4) /
+ UDP(sport=1234, dport=1234) /
+ Raw(payload))
+ pkt_info.data = p.copy()
+ size = packet_sizes[(i / 2) % len(packet_sizes)]
+ self.extend_packet(p, size)
+ pkts.append(p)
+ return pkts
+
+ def verify_capture(self, dst_if, capture_pg1, capture_pg2):
+ last_info = dict()
+ for i in self.interfaces:
+ last_info[i.sw_if_index] = None
+ dst_sw_if_index = dst_if.sw_if_index
+ if len(capture_pg1) != len(capture_pg2):
+ self.logger.error(
+ "Different number of outgoing and mirrored packets : %u != %u" %
+ (len(capture_pg1), len(capture_pg2)))
+ raise
+ for pkt_pg1, pkt_pg2 in zip(capture_pg1, capture_pg2):
+ try:
+ ip1 = pkt_pg1[IP]
+ udp1 = pkt_pg1[UDP]
+ raw1 = pkt_pg1[Raw]
+
+ if pkt_pg1[Ether] != pkt_pg2[Ether]:
+ self.logger.error("Different ethernet header of "
+ "outgoing and mirrored packet")
+ raise
+ if ip1 != pkt_pg2[IP]:
+ self.logger.error(
+ "Different ip header of outgoing and mirrored packet")
+ raise
+ if udp1 != pkt_pg2[UDP]:
+ self.logger.error(
+ "Different udp header of outgoing and mirrored packet")
+ raise
+ if raw1 != pkt_pg2[Raw]:
+ self.logger.error(
+ "Different raw data of outgoing and mirrored packet")
+ raise
+
+ payload_info = self.payload_to_info(str(raw1))
+ packet_index = payload_info.index
+ self.assertEqual(payload_info.dst, dst_sw_if_index)
+ self.logger.debug(
+ "Got packet on port %s: src=%u (id=%u)" %
+ (dst_if.name, payload_info.src, packet_index))
+ next_info = self.get_next_packet_info_for_interface2(
+ payload_info.src, dst_sw_if_index,
+ last_info[payload_info.src])
+ last_info[payload_info.src] = next_info
+ self.assertTrue(next_info is not None)
+ self.assertEqual(packet_index, next_info.index)
+ saved_packet = next_info.data
+ # Check standard fields
+ self.assertEqual(ip1.src, saved_packet[IP].src)
+ self.assertEqual(ip1.dst, saved_packet[IP].dst)
+ self.assertEqual(udp1.sport, saved_packet[UDP].sport)
+ self.assertEqual(udp1.dport, saved_packet[UDP].dport)
+ except:
+ self.logger.error("Unexpected or invalid packets:")
+ self.logger.error(ppp("pg1 packet:", pkt_pg1))
+ self.logger.error(ppp("pg2 packet:", pkt_pg2))
+ raise
+ for i in self.interfaces:
+ remaining_packet = self.get_next_packet_info_for_interface2(
+ i, dst_sw_if_index, last_info[i.sw_if_index])
+ self.assertTrue(remaining_packet is None,
+ "Port %u: Packet expected from source %u didn't"
+ " arrive" % (dst_sw_if_index, i.sw_if_index))
+
+ def test_span(self):
+ """ SPAN test
+
+ Test scenario:
+ 1. config
+ 3 interfaces, pg0 l2xconnected with pg1
+ 2. sending l2 eth packets between 2 interfaces (pg0, pg1) and
+ mirrored to pg2
+ 64B, 512B, 1518B, 9018B (ether_size)
+ burst of packets per interface
+ """
+
+ # Create incoming packet streams for packet-generator interfaces
+ pkts = self.create_stream(self.pg0, self.pg_if_packet_sizes)
+ self.pg0.add_stream(pkts)
+
+ # Enable packet capturing and start packet sending
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ # Verify packets outgoing packet streams on mirrored interface (pg2)
+ self.logger.info("Verifying capture on interfaces %s and %s" %
+ (self.pg1.name, self.pg2.name))
+ self.verify_capture(self.pg1,
+ self.pg1.get_capture(),
+ self.pg2.get_capture())
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/vpp/test/test_vxlan.py b/vpp/test/test_vxlan.py
new file mode 100644
index 00000000..845b8175
--- /dev/null
+++ b/vpp/test/test_vxlan.py
@@ -0,0 +1,196 @@
+#!/usr/bin/env python
+
+import socket
+import unittest
+from framework import VppTestCase, VppTestRunner
+from template_bd import BridgeDomain
+
+from scapy.layers.l2 import Ether
+from scapy.layers.inet import IP, UDP
+from scapy.layers.vxlan import VXLAN
+from scapy.utils import atol
+
+
+class TestVxlan(BridgeDomain, VppTestCase):
+ """ VXLAN Test Case """
+
+ def __init__(self, *args):
+ BridgeDomain.__init__(self)
+ VppTestCase.__init__(self, *args)
+
+ def encapsulate(self, pkt, vni):
+ """
+ Encapsulate the original payload frame by adding VXLAN header with its
+ UDP, IP and Ethernet fields
+ """
+ return (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
+ UDP(sport=self.dport, dport=self.dport, chksum=0) /
+ VXLAN(vni=vni, flags=self.flags) /
+ pkt)
+
+ def encap_mcast(self, pkt, src_ip, src_mac, vni):
+ """
+ Encapsulate the original payload frame by adding VXLAN header with its
+ UDP, IP and Ethernet fields
+ """
+ return (Ether(src=src_mac, dst=self.mcast_mac4) /
+ IP(src=src_ip, dst=self.mcast_ip4) /
+ UDP(sport=self.dport, dport=self.dport, chksum=0) /
+ VXLAN(vni=vni, flags=self.flags) /
+ pkt)
+
+ def decapsulate(self, pkt):
+ """
+ Decapsulate the original payload frame by removing VXLAN header
+ """
+ # check if is set I flag
+ self.assertEqual(pkt[VXLAN].flags, int('0x8', 16))
+ return pkt[VXLAN].payload
+
+ # Method for checking VXLAN encapsulation.
+ #
+ def check_encapsulation(self, pkt, vni, local_only=False):
+ # TODO: add error messages
+ # Verify source MAC is VPP_MAC and destination MAC is MY_MAC resolved
+ # by VPP using ARP.
+ self.assertEqual(pkt[Ether].src, self.pg0.local_mac)
+ if not local_only:
+ self.assertEqual(pkt[Ether].dst, self.pg0.remote_mac)
+ # Verify VXLAN tunnel source IP is VPP_IP and destination IP is MY_IP.
+ self.assertEqual(pkt[IP].src, self.pg0.local_ip4)
+ if not local_only:
+ self.assertEqual(pkt[IP].dst, self.pg0.remote_ip4)
+ # Verify UDP destination port is VXLAN 4789, source UDP port could be
+ # arbitrary.
+ self.assertEqual(pkt[UDP].dport, type(self).dport)
+ # TODO: checksum check
+ # Verify VNI
+ self.assertEqual(pkt[VXLAN].vni, vni)
+
+ @staticmethod
+ def ip4_range(ip4n, s=10, e=20):
+ base = str(bytearray(ip4n)[:3])
+ return ((base + ip) for ip in str(bytearray(range(s, e))))
+
+ @classmethod
+ def create_vxlan_flood_test_bd(cls, vni):
+ # Create 10 ucast vxlan tunnels under bd
+ ip_range_start = 10
+ ip_range_end = 20
+ next_hop_address = cls.pg0.remote_ip4n
+ for dest_addr in cls.ip4_range(next_hop_address, ip_range_start,
+ ip_range_end):
+ # add host route so dest_addr will not be resolved
+ cls.vapi.ip_add_del_route(dest_addr, 32, next_hop_address)
+ r = cls.vapi.vxlan_add_del_tunnel(
+ src_addr=cls.pg0.local_ip4n,
+ dst_addr=dest_addr,
+ vni=vni)
+ cls.vapi.sw_interface_set_l2_bridge(r.sw_if_index, bd_id=vni)
+
+ @classmethod
+ def add_del_mcast_load(cls, is_add):
+ ip_range_start = 10
+ ip_range_end = 210
+ for dest_addr in cls.ip4_range(cls.mcast_ip4n, ip_range_start,
+ ip_range_end):
+ vni = bytearray(dest_addr)[3]
+ cls.vapi.vxlan_add_del_tunnel(
+ src_addr=cls.pg0.local_ip4n,
+ dst_addr=dest_addr,
+ mcast_sw_if_index=1,
+ vni=vni,
+ is_add=is_add)
+
+ @classmethod
+ def add_mcast_load(cls):
+ cls.add_del_mcast_load(is_add=1)
+
+ @classmethod
+ def del_mcast_load(cls):
+ cls.add_del_mcast_load(is_add=0)
+
+ # Class method to start the VXLAN test case.
+ # Overrides setUpClass method in VppTestCase class.
+ # Python try..except statement is used to ensure that the tear down of
+ # the class will be executed even if exception is raised.
+ # @param cls The class pointer.
+ @classmethod
+ def setUpClass(cls):
+ super(TestVxlan, cls).setUpClass()
+
+ try:
+ cls.dport = 4789
+ cls.flags = 0x8
+
+ # Create 2 pg interfaces.
+ cls.create_pg_interfaces(range(4))
+ for pg in cls.pg_interfaces:
+ pg.admin_up()
+
+ # Configure IPv4 addresses on VPP pg0.
+ cls.pg0.config_ip4()
+
+ # Resolve MAC address for VPP's IP address on pg0.
+ cls.pg0.resolve_arp()
+
+ # Our Multicast address
+ cls.mcast_ip4 = '239.1.1.1'
+ cls.mcast_ip4n = socket.inet_pton(socket.AF_INET, cls.mcast_ip4)
+ iplong = atol(cls.mcast_ip4)
+ cls.mcast_mac4 = "01:00:5e:%02x:%02x:%02x" % (
+ (iplong >> 16) & 0x7F, (iplong >> 8) & 0xFF, iplong & 0xFF)
+
+ # Create VXLAN VTEP on VPP pg0, and put vxlan_tunnel0 and pg1
+ # into BD.
+ cls.single_tunnel_bd = 1
+ r = cls.vapi.vxlan_add_del_tunnel(
+ src_addr=cls.pg0.local_ip4n,
+ dst_addr=cls.pg0.remote_ip4n,
+ vni=cls.single_tunnel_bd)
+ cls.vapi.sw_interface_set_l2_bridge(r.sw_if_index,
+ bd_id=cls.single_tunnel_bd)
+ cls.vapi.sw_interface_set_l2_bridge(cls.pg1.sw_if_index,
+ bd_id=cls.single_tunnel_bd)
+
+ # Setup vni 2 to test multicast flooding
+ cls.mcast_flood_bd = 2
+ cls.create_vxlan_flood_test_bd(cls.mcast_flood_bd)
+ r = cls.vapi.vxlan_add_del_tunnel(
+ src_addr=cls.pg0.local_ip4n,
+ dst_addr=cls.mcast_ip4n,
+ mcast_sw_if_index=1,
+ vni=cls.mcast_flood_bd)
+ cls.vapi.sw_interface_set_l2_bridge(r.sw_if_index,
+ bd_id=cls.mcast_flood_bd)
+ cls.vapi.sw_interface_set_l2_bridge(cls.pg2.sw_if_index,
+ bd_id=cls.mcast_flood_bd)
+
+ # Add and delete mcast tunnels to check stability
+ cls.add_mcast_load()
+ cls.del_mcast_load()
+
+ # Setup vni 3 to test unicast flooding
+ cls.ucast_flood_bd = 3
+ cls.create_vxlan_flood_test_bd(cls.ucast_flood_bd)
+ cls.vapi.sw_interface_set_l2_bridge(cls.pg3.sw_if_index,
+ bd_id=cls.ucast_flood_bd)
+ except Exception:
+ super(TestVxlan, cls).tearDownClass()
+ raise
+
+ # Method to define VPP actions before tear down of the test case.
+ # Overrides tearDown method in VppTestCase class.
+ # @param self The object pointer.
+ def tearDown(self):
+ super(TestVxlan, self).tearDown()
+ if not self.vpp_dead:
+ self.logger.info(self.vapi.cli("show bridge-domain 1 detail"))
+ self.logger.info(self.vapi.cli("show bridge-domain 2 detail"))
+ self.logger.info(self.vapi.cli("show bridge-domain 3 detail"))
+ self.logger.info(self.vapi.cli("show vxlan tunnel"))
+
+
+if __name__ == '__main__':
+ unittest.main(testRunner=VppTestRunner)
diff --git a/vpp/test/util.py b/vpp/test/util.py
new file mode 100644
index 00000000..0ac23760
--- /dev/null
+++ b/vpp/test/util.py
@@ -0,0 +1,87 @@
+import socket
+import sys
+from abc import abstractmethod, ABCMeta
+from cStringIO import StringIO
+
+
+def ppp(headline, packet):
+ """ Return string containing the output of scapy packet.show() call. """
+ o = StringIO()
+ old_stdout = sys.stdout
+ sys.stdout = o
+ print(headline)
+ packet.show()
+ sys.stdout = old_stdout
+ return o.getvalue()
+
+
+def ppc(headline, capture, limit=10):
+ """ Return string containing ppp() printout for a capture.
+
+ :param headline: printed as first line of output
+ :param capture: packets to print
+ :param limit: limit the print to # of packets
+ """
+ if not capture:
+ return headline
+ result = headline + "\n"
+ count = 1
+ for p in capture:
+ result.append(ppp("Packet #%s:" % count, p))
+ count += 1
+ if count >= limit:
+ break
+ if limit < len(capture):
+ result.append(
+ "Capture contains %s packets in total, of which %s were printed" %
+ (len(capture), limit))
+
+
+class NumericConstant(object):
+ __metaclass__ = ABCMeta
+
+ desc_dict = {}
+
+ @abstractmethod
+ def __init__(self, value):
+ self._value = value
+
+ def __int__(self):
+ return self._value
+
+ def __long__(self):
+ return self._value
+
+ def __str__(self):
+ if self._value in self.desc_dict:
+ return self.desc_dict[self._value]
+ return ""
+
+
+class Host(object):
+ """ Generic test host "connected" to VPPs interface. """
+
+ @property
+ def mac(self):
+ """ MAC address """
+ return self._mac
+
+ @property
+ def ip4(self):
+ """ IPv4 address """
+ return self._ip4
+
+ @property
+ def ip4n(self):
+ """ IPv4 address """
+ return socket.inet_pton(socket.AF_INET, self._ip4)
+
+ @property
+ def ip6(self):
+ """ IPv6 address """
+ return self._ip6
+
+ def __init__(self, mac=None, ip4=None, ip6=None):
+ self._mac = mac
+ self._ip4 = ip4
+ self._ip6 = ip6
diff --git a/vpp/test/vpp_gre_interface.py b/vpp/test/vpp_gre_interface.py
new file mode 100644
index 00000000..58a68290
--- /dev/null
+++ b/vpp/test/vpp_gre_interface.py
@@ -0,0 +1,36 @@
+
+from vpp_interface import VppInterface
+import socket
+
+
+class VppGreInterface(VppInterface):
+ """
+ VPP GRE interface
+ """
+
+ def __init__(self, test, src_ip, dst_ip, outer_fib_id=0, is_teb=0):
+ """ Create VPP loopback interface """
+ self._sw_if_index = 0
+ super(VppGreInterface, self).__init__(test)
+ self._test = test
+ self.t_src = src_ip
+ self.t_dst = dst_ip
+ self.t_outer_fib = outer_fib_id
+ self.t_is_teb = is_teb
+
+ def add_vpp_config(self):
+ s = socket.inet_pton(socket.AF_INET, self.t_src)
+ d = socket.inet_pton(socket.AF_INET, self.t_dst)
+ r = self.test.vapi.gre_tunnel_add_del(s, d,
+ outer_fib_id=self.t_outer_fib,
+ is_teb=self.t_is_teb)
+ self._sw_if_index = r.sw_if_index
+ self.generate_remote_hosts()
+
+ def remove_vpp_config(self):
+ s = socket.inet_pton(socket.AF_INET, self.t_src)
+ d = socket.inet_pton(socket.AF_INET, self.t_dst)
+ self.unconfig()
+ r = self.test.vapi.gre_tunnel_add_del(s, d,
+ outer_fib_id=self.t_outer_fib,
+ is_add=0)
diff --git a/vpp/test/vpp_interface.py b/vpp/test/vpp_interface.py
new file mode 100644
index 00000000..78865108
--- /dev/null
+++ b/vpp/test/vpp_interface.py
@@ -0,0 +1,264 @@
+from abc import abstractmethod, ABCMeta
+import socket
+
+from util import Host
+
+
+class VppInterface(object):
+ """Generic VPP interface."""
+ __metaclass__ = ABCMeta
+
+ @property
+ def sw_if_index(self):
+ """Interface index assigned by VPP."""
+ return self._sw_if_index
+
+ @property
+ def remote_mac(self):
+ """MAC-address of the remote interface "connected" to this interface."""
+ return self._remote_hosts[0].mac
+
+ @property
+ def local_mac(self):
+ """MAC-address of the VPP interface."""
+ return self._local_mac
+
+ @property
+ def local_ip4(self):
+ """Local IPv4 address on VPP interface (string)."""
+ return self._local_ip4
+
+ @property
+ def local_ip4n(self):
+ """Local IPv4 address - raw, suitable as API parameter."""
+ return socket.inet_pton(socket.AF_INET, self._local_ip4)
+
+ @property
+ def remote_ip4(self):
+ """IPv4 address of remote peer "connected" to this interface."""
+ return self._remote_hosts[0].ip4
+
+ @property
+ def remote_ip4n(self):
+ """IPv4 address of remote peer - raw, suitable as API parameter."""
+ return socket.inet_pton(socket.AF_INET, self.remote_ip4)
+
+ @property
+ def local_ip6(self):
+ """Local IPv6 address on VPP interface (string)."""
+ return self._local_ip6
+
+ @property
+ def local_ip6n(self):
+ """Local IPv6 address - raw, suitable as API parameter."""
+ return socket.inet_pton(socket.AF_INET6, self.local_ip6)
+
+ @property
+ def remote_ip6(self):
+ """IPv6 address of remote peer "connected" to this interface."""
+ return self._remote_hosts[0].ip6
+
+ @property
+ def remote_ip6n(self):
+ """IPv6 address of remote peer - raw, suitable as API parameter"""
+ return socket.inet_pton(socket.AF_INET6, self.remote_ip6)
+
+ @property
+ def name(self):
+ """Name of the interface."""
+ return self._name
+
+ @property
+ def dump(self):
+ """RAW result of sw_interface_dump for this interface."""
+ return self._dump
+
+ @property
+ def test(self):
+ """Test case creating this interface."""
+ return self._test
+
+ @property
+ def remote_hosts(self):
+ """Remote hosts list"""
+ return self._remote_hosts
+
+ @remote_hosts.setter
+ def remote_hosts(self, value):
+ """
+ :param list value: List of remote hosts.
+ """
+ self._remote_hosts = value
+ self._hosts_by_mac = {}
+ self._hosts_by_ip4 = {}
+ self._hosts_by_ip6 = {}
+ for host in self._remote_hosts:
+ self._hosts_by_mac[host.mac] = host
+ self._hosts_by_ip4[host.ip4] = host
+ self._hosts_by_ip6[host.ip6] = host
+
+ def host_by_mac(self, mac):
+ """
+ :param mac: MAC address to find host by.
+ :return: Host object assigned to interface.
+ """
+ return self._hosts_by_mac[mac]
+
+ def host_by_ip4(self, ip):
+ """
+ :param ip: IPv4 address to find host by.
+ :return: Host object assigned to interface.
+ """
+ return self._hosts_by_ip4[ip]
+
+ def host_by_ip6(self, ip):
+ """
+ :param ip: IPv6 address to find host by.
+ :return: Host object assigned to interface.
+ """
+ return self._hosts_by_ip6[ip]
+
+ def generate_remote_hosts(self, count=1):
+ """Generate and add remote hosts for the interface.
+
+ :param int count: Number of generated remote hosts.
+ """
+ self._remote_hosts = []
+ self._hosts_by_mac = {}
+ self._hosts_by_ip4 = {}
+ self._hosts_by_ip6 = {}
+ for i in range(
+ 2, count + 2): # 0: network address, 1: local vpp address
+ mac = "02:%02x:00:00:ff:%02x" % (self.sw_if_index, i)
+ ip4 = "172.16.%u.%u" % (self.sw_if_index, i)
+ ip6 = "fd01:%04x::%04x" % (self.sw_if_index, i)
+ host = Host(mac, ip4, ip6)
+ self._remote_hosts.append(host)
+ self._hosts_by_mac[mac] = host
+ self._hosts_by_ip4[ip4] = host
+ self._hosts_by_ip6[ip6] = host
+
+ @abstractmethod
+ def __init__(self, test):
+ self._test = test
+
+ self._remote_hosts = []
+ self._hosts_by_mac = {}
+ self._hosts_by_ip4 = {}
+ self._hosts_by_ip6 = {}
+
+ self.generate_remote_hosts()
+
+ self._local_ip4 = "172.16.%u.1" % self.sw_if_index
+ self._local_ip4n = socket.inet_pton(socket.AF_INET, self.local_ip4)
+
+ self._local_ip6 = "fd01:%04x::1" % self.sw_if_index
+ self._local_ip6n = socket.inet_pton(socket.AF_INET6, self.local_ip6)
+
+ r = self.test.vapi.sw_interface_dump()
+ for intf in r:
+ if intf.sw_if_index == self.sw_if_index:
+ self._name = intf.interface_name.split(b'\0', 1)[0]
+ self._local_mac =\
+ ':'.join(intf.l2_address.encode('hex')[i:i + 2]
+ for i in range(0, 12, 2))
+ self._dump = intf
+ break
+ else:
+ raise Exception(
+ "Could not find interface with sw_if_index %d "
+ "in interface dump %s" %
+ (self.sw_if_index, repr(r)))
+
+ def config_ip4(self):
+ """Configure IPv4 address on the VPP interface."""
+ addr = self.local_ip4n
+ addr_len = 24
+ self.test.vapi.sw_interface_add_del_address(
+ self.sw_if_index, addr, addr_len)
+ self.has_ip4_config = True
+
+ def unconfig_ip4(self):
+ """Remove IPv4 address on the VPP interface"""
+ try:
+ if (self.has_ip4_config):
+ self.test.vapi.sw_interface_add_del_address(
+ self.sw_if_index,
+ self.local_ip4n,
+ 24, is_add=0)
+ except AttributeError:
+ self.has_ip4_config = False
+ self.has_ip4_config = False
+
+ def configure_ipv4_neighbors(self):
+ """For every remote host assign neighbor's MAC to IPv4 addresses."""
+ for host in self._remote_hosts:
+ macn = host.mac.replace(":", "").decode('hex')
+ ipn = host.ip4n
+ self.test.vapi.ip_neighbor_add_del(self.sw_if_index, macn, ipn)
+
+ def config_ip6(self):
+ """Configure IPv6 address on the VPP interface."""
+ addr = self._local_ip6n
+ addr_len = 64
+ self.test.vapi.sw_interface_add_del_address(
+ self.sw_if_index, addr, addr_len, is_ipv6=1)
+ self.has_ip6_config = True
+
+ def unconfig_ip6(self):
+ """Remove IPv6 address on the VPP interface"""
+ try:
+ if (self.has_ip6_config):
+ self.test.vapi.sw_interface_add_del_address(
+ self.sw_if_index,
+ self.local_ip6n,
+ 64, is_ipv6=1, is_add=0)
+ except AttributeError:
+ self.has_ip6_config = False
+ self.has_ip6_config = False
+
+ def unconfig(self):
+ """Unconfigure IPv6 and IPv4 address on the VPP interface"""
+ self.unconfig_ip4()
+ self.unconfig_ip6()
+
+ def set_table_ip4(self, table_id):
+ """Set the interface in a IPv4 Table.
+
+ .. note:: Must be called before configuring IP4 addresses."""
+ self.test.vapi.sw_interface_set_table(
+ self.sw_if_index, 0, table_id)
+
+ def set_table_ip6(self, table_id):
+ """Set the interface in a IPv6 Table.
+
+ .. note:: Must be called before configuring IP6 addresses.
+ """
+ self.test.vapi.sw_interface_set_table(
+ self.sw_if_index, 1, table_id)
+
+ def disable_ipv6_ra(self):
+ """Configure IPv6 RA suppress on the VPP interface."""
+ self.test.vapi.sw_interface_ra_suppress(self.sw_if_index)
+
+ def admin_up(self):
+ """Put interface ADMIN-UP."""
+ self.test.vapi.sw_interface_set_flags(self.sw_if_index, admin_up_down=1)
+
+ def add_sub_if(self, sub_if):
+ """Register a sub-interface with this interface.
+
+ :param sub_if: sub-interface
+ """
+ if not hasattr(self, 'sub_if'):
+ self.sub_if = sub_if
+ else:
+ if isinstance(self.sub_if, list):
+ self.sub_if.append(sub_if)
+ else:
+ self.sub_if = sub_if
+
+ def enable_mpls(self):
+ """Enable MPLS on the VPP interface."""
+ self.test.vapi.sw_interface_enable_disable_mpls(
+ self.sw_if_index)
diff --git a/vpp/test/vpp_ip_route.py b/vpp/test/vpp_ip_route.py
new file mode 100644
index 00000000..75f400f1
--- /dev/null
+++ b/vpp/test/vpp_ip_route.py
@@ -0,0 +1,116 @@
+"""
+ IP Routes
+
+ object abstractions for representing IP routes in VPP
+"""
+
+import socket
+
+# from vnet/vnet/mpls/mpls_types.h
+MPLS_IETF_MAX_LABEL = 0xfffff
+MPLS_LABEL_INVALID = MPLS_IETF_MAX_LABEL + 1
+
+
+class RoutePath:
+
+ def __init__(self, nh_addr, nh_sw_if_index, nh_table_id=0, labels=[], nh_via_label=MPLS_LABEL_INVALID):
+ self.nh_addr = socket.inet_pton(socket.AF_INET, nh_addr)
+ self.nh_itf = nh_sw_if_index
+ self.nh_table_id = nh_table_id
+ self.nh_via_label = nh_via_label
+ self.nh_labels = labels
+
+
+class IpRoute:
+ """
+ IP Route
+ """
+
+ def __init__(self, test, dest_addr,
+ dest_addr_len, paths, table_id=0):
+ self._test = test
+ self.paths = paths
+ self.dest_addr = socket.inet_pton(socket.AF_INET, dest_addr)
+ self.dest_addr_len = dest_addr_len
+ self.table_id = table_id
+
+ def add_vpp_config(self):
+ for path in self.paths:
+ self._test.vapi.ip_add_del_route(self.dest_addr,
+ self.dest_addr_len,
+ path.nh_addr,
+ path.nh_itf,
+ table_id=self.table_id,
+ next_hop_out_label_stack=path.nh_labels,
+ next_hop_n_out_labels=len(
+ path.nh_labels),
+ next_hop_via_label=path.nh_via_label)
+
+ def remove_vpp_config(self):
+ for path in self.paths:
+ self._test.vapi.ip_add_del_route(self.dest_addr,
+ self.dest_addr_len,
+ path.nh_addr,
+ path.nh_itf,
+ table_id=self.table_id,
+ is_add=0)
+
+
+class MplsIpBind:
+ """
+ MPLS to IP Binding
+ """
+
+ def __init__(self, test, local_label, dest_addr, dest_addr_len):
+ self._test = test
+ self.dest_addr = socket.inet_pton(socket.AF_INET, dest_addr)
+ self.dest_addr_len = dest_addr_len
+ self.local_label = local_label
+
+ def add_vpp_config(self):
+ self._test.vapi.mpls_ip_bind_unbind(self.local_label,
+ self.dest_addr,
+ self.dest_addr_len)
+
+ def remove_vpp_config(self):
+ self._test.vapi.mpls_ip_bind_unbind(self.local_label,
+ self.dest_addr,
+ self.dest_addr_len,
+ is_bind=0)
+
+
+class MplsRoute:
+ """
+ MPLS Route
+ """
+
+ def __init__(self, test, local_label, eos_bit, paths, table_id=0):
+ self._test = test
+ self.paths = paths
+ self.local_label = local_label
+ self.eos_bit = eos_bit
+ self.table_id = table_id
+
+ def add_vpp_config(self):
+ for path in self.paths:
+ self._test.vapi.mpls_route_add_del(self.local_label,
+ self.eos_bit,
+ 1,
+ path.nh_addr,
+ path.nh_itf,
+ table_id=self.table_id,
+ next_hop_out_label_stack=path.nh_labels,
+ next_hop_n_out_labels=len(
+ path.nh_labels),
+ next_hop_via_label=path.nh_via_label,
+ next_hop_table_id=path.nh_table_id)
+
+ def remove_vpp_config(self):
+ for path in self.paths:
+ self._test.vapi.mpls_route_add_del(self.local_label,
+ self.eos_bit,
+ 1,
+ path.nh_addr,
+ path.nh_itf,
+ table_id=self.table_id,
+ is_add=0)
diff --git a/vpp/test/vpp_lo_interface.py b/vpp/test/vpp_lo_interface.py
new file mode 100644
index 00000000..19ee1523
--- /dev/null
+++ b/vpp/test/vpp_lo_interface.py
@@ -0,0 +1,13 @@
+
+from vpp_interface import VppInterface
+
+
+class VppLoInterface(VppInterface):
+ """VPP loopback interface."""
+
+ def __init__(self, test, lo_index):
+ """ Create VPP loopback interface """
+ r = test.vapi.create_loopback()
+ self._sw_if_index = r.sw_if_index
+ super(VppLoInterface, self).__init__(test)
+ self._lo_index = lo_index
diff --git a/vpp/test/vpp_object.py b/vpp/test/vpp_object.py
new file mode 100644
index 00000000..2b71fc1f
--- /dev/null
+++ b/vpp/test/vpp_object.py
@@ -0,0 +1,79 @@
+from abc import ABCMeta, abstractmethod
+
+
+class VppObject(object):
+ """ Abstract vpp object """
+ __metaclass__ = ABCMeta
+
+ def __init__(self):
+ VppObjectRegistry().register(self)
+
+ @abstractmethod
+ def add_vpp_config(self):
+ """ Add the configuration for this object to vpp. """
+ pass
+
+ @abstractmethod
+ def query_vpp_config(self):
+ """Query the vpp configuration.
+
+ :return: True if the object is configured"""
+ pass
+
+ @abstractmethod
+ def remove_vpp_config(self):
+ """ Remove the configuration for this object from vpp. """
+ pass
+
+ @abstractmethod
+ def object_id(self):
+ """ Return a unique string representing this object. """
+ pass
+
+
+class VppObjectRegistry(object):
+ """ Class which handles automatic configuration cleanup. """
+ _shared_state = {}
+
+ def __init__(self):
+ self.__dict__ = self._shared_state
+ if not hasattr(self, "_object_registry"):
+ self._object_registry = []
+ if not hasattr(self, "_object_dict"):
+ self._object_dict = dict()
+
+ def register(self, o):
+ """ Register an object in the registry. """
+ if not o.unique_id() in self._object_dict:
+ self._object_registry.append(o)
+ self._object_dict[o.unique_id()] = o
+ else:
+ print "not adding duplicate %s" % o
+
+ def remove_vpp_config(self, logger):
+ """
+ Remove configuration (if present) from vpp and then remove all objects
+ from the registry.
+ """
+ if not self._object_registry:
+ logger.info("No objects registered for auto-cleanup.")
+ return
+ logger.info("Removing VPP configuration for registered objects")
+ for o in reversed(self._object_registry):
+ if o.query_vpp_config():
+ logger.info("Removing %s", o)
+ o.remove_vpp_config()
+ else:
+ logger.info("Skipping %s, configuration not present", o)
+ failed = []
+ for o in self._object_registry:
+ if o.query_vpp_config():
+ failed.append(o)
+ self._object_registry = []
+ self._object_dict = dict()
+ if failed:
+ logger.error("Couldn't remove configuration for object(s):")
+ for x in failed:
+ logger.error(repr(x))
+ raise Exception("Couldn't remove configuration for object(s): %s" %
+ (", ".join(str(x) for x in failed)))
diff --git a/vpp/test/vpp_papi_provider.py b/vpp/test/vpp_papi_provider.py
new file mode 100644
index 00000000..3279a274
--- /dev/null
+++ b/vpp/test/vpp_papi_provider.py
@@ -0,0 +1,986 @@
+import os
+import fnmatch
+import time
+from hook import Hook
+from collections import deque
+
+# Sphinx creates auto-generated documentation by importing the python source
+# files and collecting the docstrings from them. The NO_VPP_PAPI flag allows the
+# vpp_papi_provider.py file to be importable without having to build the whole
+# vpp api if the user only wishes to generate the test documentation.
+do_import = True
+try:
+ no_vpp_papi = os.getenv("NO_VPP_PAPI")
+ if no_vpp_papi == "1":
+ do_import = False
+except:
+ pass
+
+if do_import:
+ from vpp_papi import VPP
+
+# from vnet/vnet/mpls/mpls_types.h
+MPLS_IETF_MAX_LABEL = 0xfffff
+MPLS_LABEL_INVALID = MPLS_IETF_MAX_LABEL + 1
+
+
+class L2_VTR_OP:
+ L2_POP_1 = 3
+
+
+class VppPapiProvider(object):
+ """VPP-api provider using vpp-papi
+
+ @property hook: hook object providing before and after api/cli hooks
+
+
+ """
+
+ def __init__(self, name, shm_prefix, test_class):
+ self.hook = Hook("vpp-papi-provider")
+ self.name = name
+ self.shm_prefix = shm_prefix
+ self.test_class = test_class
+ jsonfiles = []
+
+ install_dir = os.getenv('VPP_TEST_INSTALL_PATH')
+ for root, dirnames, filenames in os.walk(install_dir):
+ for filename in fnmatch.filter(filenames, '*.api.json'):
+ jsonfiles.append(os.path.join(root, filename))
+
+ self.papi = VPP(jsonfiles)
+ self._events = deque()
+
+ def register_hook(self, hook):
+ """Replace hook registration with new hook
+
+ :param hook:
+
+ """
+ self.hook = hook
+
+ def collect_events(self):
+ """ Collect all events from the internal queue and clear the queue. """
+ e = self._events
+ self._events = deque()
+ return e
+
+ def wait_for_event(self, timeout, name=None):
+ """ Wait for and return next event. """
+ if self._events:
+ self.test_class.logger.debug("Not waiting, event already queued")
+ limit = time.time() + timeout
+ while time.time() < limit:
+ if self._events:
+ e = self._events.popleft()
+ if name and type(e).__name__ != name:
+ raise Exception(
+ "Unexpected event received: %s, expected: %s" %
+ (type(e).__name__, name))
+ self.test_class.logger.debug("Returning event %s:%s" %
+ (name, e))
+ return e
+ time.sleep(0) # yield
+ if name is not None:
+ raise Exception("Event %s did not occur within timeout" % name)
+ raise Exception("Event did not occur within timeout")
+
+ def __call__(self, name, event):
+ """ Enqueue event in the internal event queue. """
+ # FIXME use the name instead of relying on type(e).__name__ ?
+ # FIXME #2 if this throws, it is eaten silently, Ole?
+ self.test_class.logger.debug("New event: %s: %s" % (name, event))
+ self._events.append(event)
+
+ def connect(self):
+ """Connect the API to VPP"""
+ self.papi.connect(self.name, self.shm_prefix)
+ self.papi.register_event_callback(self)
+
+ def disconnect(self):
+ """Disconnect the API from VPP"""
+ self.papi.disconnect()
+
+ def api(self, api_fn, api_args, expected_retval=0):
+ """ Call API function and check it's return value.
+ Call the appropriate hooks before and after the API call
+
+ :param api_fn: API function to call
+ :param api_args: tuple of API function arguments
+ :param expected_retval: Expected return value (Default value = 0)
+ :returns: reply from the API
+
+ """
+ self.hook.before_api(api_fn.__name__, api_args)
+ reply = api_fn(**api_args)
+ if hasattr(reply, 'retval') and reply.retval != expected_retval:
+ msg = "API call failed, expected retval == %d, got %s" % (
+ expected_retval, repr(reply))
+ self.test_class.logger.info(msg)
+ raise Exception(msg)
+ self.hook.after_api(api_fn.__name__, api_args)
+ return reply
+
+ def cli(self, cli):
+ """ Execute a CLI, calling the before/after hooks appropriately.
+
+ :param cli: CLI to execute
+ :returns: CLI output
+
+ """
+ self.hook.before_cli(cli)
+ cli += '\n'
+ r = self.papi.cli_inband(length=len(cli), cmd=cli)
+ self.hook.after_cli(cli)
+ if hasattr(r, 'reply'):
+ return r.reply.decode().rstrip('\x00')
+
+ def ppcli(self, cli):
+ """ Helper method to print CLI command in case of info logging level.
+
+ :param cli: CLI to execute
+ :returns: CLI output
+ """
+ return cli + "\n" + str(self.cli(cli))
+
+ def _convert_mac(self, mac):
+ return int(mac.replace(":", ""), 16) << 16
+
+ def show_version(self):
+ """ """
+ return self.papi.show_version()
+
+ def pg_create_interface(self, pg_index):
+ """
+
+ :param pg_index:
+
+ """
+ return self.api(self.papi.pg_create_interface,
+ {"interface_id": pg_index})
+
+ def sw_interface_dump(self, filter=None):
+ """
+
+ :param filter: (Default value = None)
+
+ """
+ if filter is not None:
+ args = {"name_filter_valid": 1, "name_filter": filter}
+ else:
+ args = {}
+ return self.api(self.papi.sw_interface_dump, args)
+
+ def sw_interface_set_table(self, sw_if_index, is_ipv6, table_id):
+ """ Set the IPvX Table-id for the Interface
+
+ :param sw_if_index:
+ :param is_ipv6:
+ :param table_id:
+
+ """
+ return self.api(self.papi.sw_interface_set_table,
+ {'sw_if_index': sw_if_index, 'is_ipv6': is_ipv6,
+ 'vrf_id': table_id})
+
+ def sw_interface_add_del_address(self, sw_if_index, addr, addr_len,
+ is_ipv6=0, is_add=1, del_all=0):
+ """
+
+ :param addr: param is_ipv6: (Default value = 0)
+ :param sw_if_index:
+ :param addr_len:
+ :param is_ipv6: (Default value = 0)
+ :param is_add: (Default value = 1)
+ :param del_all: (Default value = 0)
+
+ """
+ return self.api(self.papi.sw_interface_add_del_address,
+ {'sw_if_index': sw_if_index,
+ 'is_add': is_add,
+ 'is_ipv6': is_ipv6,
+ 'del_all': del_all,
+ 'address_length': addr_len,
+ 'address': addr})
+
+ def sw_interface_enable_disable_mpls(self, sw_if_index,
+ is_enable=1):
+ """
+ Enable/Disable MPLS on the interface
+ :param sw_if_index:
+ :param is_enable: (Default value = 1)
+
+ """
+ return self.api(self.papi.sw_interface_set_mpls_enable,
+ {'sw_if_index': sw_if_index,
+ 'enable': is_enable})
+
+ def sw_interface_ra_suppress(self, sw_if_index):
+ return self.api(self.papi.sw_interface_ip6nd_ra_config,
+ {'sw_if_index': sw_if_index})
+
+ def vxlan_add_del_tunnel(
+ self,
+ src_addr,
+ dst_addr,
+ mcast_sw_if_index=0xFFFFFFFF,
+ is_add=1,
+ is_ipv6=0,
+ encap_vrf_id=0,
+ decap_next_index=0xFFFFFFFF,
+ vni=0):
+ """
+
+ :param dst_addr:
+ :param src_addr:
+ :param is_add: (Default value = 1)
+ :param is_ipv6: (Default value = 0)
+ :param encap_vrf_id: (Default value = 0)
+ :param decap_next_index: (Default value = 0xFFFFFFFF)
+ :param mcast_sw_if_index: (Default value = 0xFFFFFFFF)
+ :param vni: (Default value = 0)
+
+ """
+ return self.api(self.papi.vxlan_add_del_tunnel,
+ {'is_add': is_add,
+ 'is_ipv6': is_ipv6,
+ 'src_address': src_addr,
+ 'dst_address': dst_addr,
+ 'mcast_sw_if_index': mcast_sw_if_index,
+ 'encap_vrf_id': encap_vrf_id,
+ 'decap_next_index': decap_next_index,
+ 'vni': vni})
+
+ def bridge_domain_add_del(self, bd_id, flood=1, uu_flood=1, forward=1,
+ learn=1, arp_term=0, is_add=1):
+ """Create/delete bridge domain.
+
+ :param int bd_id: Bridge domain index.
+ :param int flood: Enable/disable bcast/mcast flooding in the BD.
+ (Default value = 1)
+ :param int uu_flood: Enable/disable unknown unicast flood in the BD.
+ (Default value = 1)
+ :param int forward: Enable/disable forwarding on all interfaces in
+ the BD. (Default value = 1)
+ :param int learn: Enable/disable learning on all interfaces in the BD.
+ (Default value = 1)
+ :param int arp_term: Enable/disable arp termination in the BD.
+ (Default value = 1)
+ :param int is_add: Add or delete flag. (Default value = 1)
+ """
+ return self.api(self.papi.bridge_domain_add_del,
+ {'bd_id': bd_id,
+ 'flood': flood,
+ 'uu_flood': uu_flood,
+ 'forward': forward,
+ 'learn': learn,
+ 'arp_term': arp_term,
+ 'is_add': is_add})
+
+ def l2fib_add_del(self, mac, bd_id, sw_if_index, is_add=1, static_mac=0,
+ filter_mac=0, bvi_mac=0):
+ """Create/delete L2 FIB entry.
+
+ :param str mac: MAC address to create FIB entry for.
+ :param int bd_id: Bridge domain index.
+ :param int sw_if_index: Software interface index of the interface.
+ :param int is_add: Add or delete flag. (Default value = 1)
+ :param int static_mac: Set to 1 to create static MAC entry.
+ (Default value = 0)
+ :param int filter_mac: Set to 1 to drop packet that's source or
+ destination MAC address contains defined MAC address.
+ (Default value = 0)
+ :param int bvi_mac: Set to 1 to create entry that points to BVI
+ interface. (Default value = 0)
+ """
+ return self.api(self.papi.l2fib_add_del,
+ {'mac': self._convert_mac(mac),
+ 'bd_id': bd_id,
+ 'sw_if_index': sw_if_index,
+ 'is_add': is_add,
+ 'static_mac': static_mac,
+ 'filter_mac': filter_mac,
+ 'bvi_mac': bvi_mac})
+
+ def sw_interface_set_l2_bridge(self, sw_if_index, bd_id,
+ shg=0, bvi=0, enable=1):
+ """Add/remove interface to/from bridge domain.
+
+ :param int sw_if_index: Software interface index of the interface.
+ :param int bd_id: Bridge domain index.
+ :param int shg: Split-horizon group index. (Default value = 0)
+ :param int bvi: Set interface as a bridge group virtual interface.
+ (Default value = 0)
+ :param int enable: Add or remove interface. (Default value = 1)
+ """
+ return self.api(self.papi.sw_interface_set_l2_bridge,
+ {'rx_sw_if_index': sw_if_index,
+ 'bd_id': bd_id,
+ 'shg': shg,
+ 'bvi': bvi,
+ 'enable': enable})
+
+ def bridge_flags(self, bd_id, is_set, feature_bitmap):
+ """Enable/disable required feature of the bridge domain with defined ID.
+
+ :param int bd_id: Bridge domain ID.
+ :param int is_set: Set to 1 to enable, set to 0 to disable the feature.
+ :param int feature_bitmap: Bitmap value of the feature to be set:
+ - learn (1 << 0),
+ - forward (1 << 1),
+ - flood (1 << 2),
+ - uu-flood (1 << 3) or
+ - arp-term (1 << 4).
+ """
+ return self.api(self.papi.bridge_flags,
+ {'bd_id': bd_id,
+ 'is_set': is_set,
+ 'feature_bitmap': feature_bitmap})
+
+ def bridge_domain_dump(self, bd_id=0):
+ """
+
+ :param int bd_id: Bridge domain ID. (Default value = 0 => dump of all
+ existing bridge domains returned)
+ :return: Dictionary of bridge domain(s) data.
+ """
+ return self.api(self.papi.bridge_domain_dump,
+ {'bd_id': bd_id})
+
+ def sw_interface_set_l2_xconnect(self, rx_sw_if_index, tx_sw_if_index,
+ enable):
+ """Create or delete unidirectional cross-connect from Tx interface to
+ Rx interface.
+
+ :param int rx_sw_if_index: Software interface index of Rx interface.
+ :param int tx_sw_if_index: Software interface index of Tx interface.
+ :param int enable: Create cross-connect if equal to 1, delete
+ cross-connect if equal to 0.
+
+ """
+ return self.api(self.papi.sw_interface_set_l2_xconnect,
+ {'rx_sw_if_index': rx_sw_if_index,
+ 'tx_sw_if_index': tx_sw_if_index,
+ 'enable': enable})
+
+ def sw_interface_set_l2_tag_rewrite(
+ self,
+ sw_if_index,
+ vtr_oper,
+ push=0,
+ tag1=0,
+ tag2=0):
+ """L2 interface vlan tag rewrite configure request
+ :param client_index - opaque cookie to identify the sender
+ :param context - sender context, to match reply w/ request
+ :param sw_if_index - interface the operation is applied to
+ :param vtr_op - Choose from l2_vtr_op_t enum values
+ :param push_dot1q - first pushed flag dot1q id set, else dot1ad
+ :param tag1 - Needed for any push or translate vtr op
+ :param tag2 - Needed for any push 2 or translate x-2 vtr ops
+
+ """
+ return self.api(self.papi.l2_interface_vlan_tag_rewrite,
+ {'sw_if_index': sw_if_index,
+ 'vtr_op': vtr_oper,
+ 'push_dot1q': push,
+ 'tag1': tag1,
+ 'tag2': tag2})
+
+ def sw_interface_set_flags(self, sw_if_index, admin_up_down,
+ link_up_down=0, deleted=0):
+ """
+
+ :param admin_up_down:
+ :param sw_if_index:
+ :param link_up_down: (Default value = 0)
+ :param deleted: (Default value = 0)
+
+ """
+ return self.api(self.papi.sw_interface_set_flags,
+ {'sw_if_index': sw_if_index,
+ 'admin_up_down': admin_up_down,
+ 'link_up_down': link_up_down,
+ 'deleted': deleted})
+
+ def create_subif(self, sw_if_index, sub_id, outer_vlan, inner_vlan,
+ no_tags=0, one_tag=0, two_tags=0, dot1ad=0, exact_match=0,
+ default_sub=0, outer_vlan_id_any=0, inner_vlan_id_any=0):
+ """Create subinterface
+ from vpe.api: set dot1ad = 0 for dot1q, set dot1ad = 1 for dot1ad
+
+ :param sub_id: param inner_vlan:
+ :param sw_if_index:
+ :param outer_vlan:
+ :param inner_vlan:
+ :param no_tags: (Default value = 0)
+ :param one_tag: (Default value = 0)
+ :param two_tags: (Default value = 0)
+ :param dot1ad: (Default value = 0)
+ :param exact_match: (Default value = 0)
+ :param default_sub: (Default value = 0)
+ :param outer_vlan_id_any: (Default value = 0)
+ :param inner_vlan_id_any: (Default value = 0)
+
+ """
+ return self.api(
+ self.papi.create_subif,
+ {'sw_if_index': sw_if_index,
+ 'sub_id': sub_id,
+ 'no_tags': no_tags,
+ 'one_tag': one_tag,
+ 'two_tags': two_tags,
+ 'dot1ad': dot1ad,
+ 'exact_match': exact_match,
+ 'default_sub': default_sub,
+ 'outer_vlan_id_any': outer_vlan_id_any,
+ 'inner_vlan_id_any': inner_vlan_id_any,
+ 'outer_vlan_id': outer_vlan,
+ 'inner_vlan_id': inner_vlan})
+
+ def delete_subif(self, sw_if_index):
+ """Delete subinterface
+
+ :param sw_if_index:
+ """
+ return self.api(self.papi.delete_subif,
+ {'sw_if_index': sw_if_index})
+
+ def create_vlan_subif(self, sw_if_index, vlan):
+ """
+
+ :param vlan:
+ :param sw_if_index:
+
+ """
+ return self.api(self.papi.create_vlan_subif,
+ {'sw_if_index': sw_if_index,
+ 'vlan_id': vlan})
+
+ def create_loopback(self, mac=''):
+ """
+
+ :param mac: (Optional)
+ """
+ return self.api(self.papi.create_loopback,
+ {'mac_address': mac})
+
+ def ip_add_del_route(
+ self,
+ dst_address,
+ dst_address_length,
+ next_hop_address,
+ next_hop_sw_if_index=0xFFFFFFFF,
+ table_id=0,
+ next_hop_table_id=0,
+ next_hop_weight=1,
+ next_hop_n_out_labels=0,
+ next_hop_out_label_stack=[],
+ next_hop_via_label=MPLS_LABEL_INVALID,
+ create_vrf_if_needed=0,
+ is_resolve_host=0,
+ is_resolve_attached=0,
+ classify_table_index=0xFFFFFFFF,
+ is_add=1,
+ is_drop=0,
+ is_unreach=0,
+ is_prohibit=0,
+ is_ipv6=0,
+ is_local=0,
+ is_classify=0,
+ is_multipath=0,
+ not_last=0):
+ """
+
+ :param dst_address_length:
+ :param next_hop_sw_if_index: (Default value = 0xFFFFFFFF)
+ :param dst_address:
+ :param next_hop_address:
+ :param next_hop_sw_if_index: (Default value = 0xFFFFFFFF)
+ :param vrf_id: (Default value = 0)
+ :param lookup_in_vrf: (Default value = 0)
+ :param classify_table_index: (Default value = 0xFFFFFFFF)
+ :param create_vrf_if_needed: (Default value = 0)
+ :param is_add: (Default value = 1)
+ :param is_drop: (Default value = 0)
+ :param is_ipv6: (Default value = 0)
+ :param is_local: (Default value = 0)
+ :param is_classify: (Default value = 0)
+ :param is_multipath: (Default value = 0)
+ :param is_resolve_host: (Default value = 0)
+ :param is_resolve_attached: (Default value = 0)
+ :param not_last: (Default value = 0)
+ :param next_hop_weight: (Default value = 1)
+
+ """
+
+ return self.api(
+ self.papi.ip_add_del_route,
+ {'next_hop_sw_if_index': next_hop_sw_if_index,
+ 'table_id': table_id,
+ 'classify_table_index': classify_table_index,
+ 'next_hop_table_id': next_hop_table_id,
+ 'create_vrf_if_needed': create_vrf_if_needed,
+ 'is_add': is_add,
+ 'is_drop': is_drop,
+ 'is_unreach': is_unreach,
+ 'is_prohibit': is_prohibit,
+ 'is_ipv6': is_ipv6,
+ 'is_local': is_local,
+ 'is_classify': is_classify,
+ 'is_multipath': is_multipath,
+ 'is_resolve_host': is_resolve_host,
+ 'is_resolve_attached': is_resolve_attached,
+ 'not_last': not_last,
+ 'next_hop_weight': next_hop_weight,
+ 'dst_address_length': dst_address_length,
+ 'dst_address': dst_address,
+ 'next_hop_address': next_hop_address,
+ 'next_hop_n_out_labels': next_hop_n_out_labels,
+ 'next_hop_via_label': next_hop_via_label,
+ 'next_hop_out_label_stack': next_hop_out_label_stack})
+
+ def ip_fib_dump(self):
+ return self.api(self.papi.ip_fib_dump, {})
+
+ def ip_neighbor_add_del(self,
+ sw_if_index,
+ mac_address,
+ dst_address,
+ vrf_id=0,
+ is_add=1,
+ is_ipv6=0,
+ is_static=0,
+ ):
+ """ Add neighbor MAC to IPv4 or IPv6 address.
+
+ :param sw_if_index:
+ :param mac_address:
+ :param dst_address:
+ :param vrf_id: (Default value = 0)
+ :param is_add: (Default value = 1)
+ :param is_ipv6: (Default value = 0)
+ :param is_static: (Default value = 0)
+ """
+
+ return self.api(
+ self.papi.ip_neighbor_add_del,
+ {'vrf_id': vrf_id,
+ 'sw_if_index': sw_if_index,
+ 'is_add': is_add,
+ 'is_ipv6': is_ipv6,
+ 'is_static': is_static,
+ 'mac_address': mac_address,
+ 'dst_address': dst_address
+ }
+ )
+
+ def sw_interface_span_enable_disable(
+ self, sw_if_index_from, sw_if_index_to, state=1):
+ """
+
+ :param sw_if_index_from:
+ :param sw_if_index_to:
+ :param state:
+ """
+ return self.api(self.papi.sw_interface_span_enable_disable,
+ {'sw_if_index_from': sw_if_index_from,
+ 'sw_if_index_to': sw_if_index_to,
+ 'state': state})
+
+ def gre_tunnel_add_del(self,
+ src_address,
+ dst_address,
+ outer_fib_id=0,
+ is_teb=0,
+ is_add=1,
+ is_ip6=0):
+ """ Add a GRE tunnel
+
+ :param src_address:
+ :param dst_address:
+ :param outer_fib_id: (Default value = 0)
+ :param is_add: (Default value = 1)
+ :param is_ipv6: (Default value = 0)
+ :param is_teb: (Default value = 0)
+ """
+
+ return self.api(
+ self.papi.gre_add_del_tunnel,
+ {'is_add': is_add,
+ 'is_ipv6': is_ip6,
+ 'teb': is_teb,
+ 'src_address': src_address,
+ 'dst_address': dst_address,
+ 'outer_fib_id': outer_fib_id}
+ )
+
+ def mpls_route_add_del(
+ self,
+ label,
+ eos,
+ next_hop_proto_is_ip4,
+ next_hop_address,
+ next_hop_sw_if_index=0xFFFFFFFF,
+ table_id=0,
+ next_hop_table_id=0,
+ next_hop_weight=1,
+ next_hop_n_out_labels=0,
+ next_hop_out_label_stack=[],
+ next_hop_via_label=MPLS_LABEL_INVALID,
+ create_vrf_if_needed=0,
+ is_resolve_host=0,
+ is_resolve_attached=0,
+ is_add=1,
+ is_drop=0,
+ is_multipath=0,
+ classify_table_index=0xFFFFFFFF,
+ is_classify=0,
+ not_last=0):
+ """
+
+ :param dst_address_length:
+ :param next_hop_sw_if_index: (Default value = 0xFFFFFFFF)
+ :param dst_address:
+ :param next_hop_address:
+ :param next_hop_sw_if_index: (Default value = 0xFFFFFFFF)
+ :param vrf_id: (Default value = 0)
+ :param lookup_in_vrf: (Default value = 0)
+ :param classify_table_index: (Default value = 0xFFFFFFFF)
+ :param create_vrf_if_needed: (Default value = 0)
+ :param is_add: (Default value = 1)
+ :param is_drop: (Default value = 0)
+ :param is_ipv6: (Default value = 0)
+ :param is_local: (Default value = 0)
+ :param is_classify: (Default value = 0)
+ :param is_multipath: (Default value = 0)
+ :param is_resolve_host: (Default value = 0)
+ :param is_resolve_attached: (Default value = 0)
+ :param not_last: (Default value = 0)
+ :param next_hop_weight: (Default value = 1)
+
+ """
+
+ return self.api(
+ self.papi.mpls_route_add_del,
+ {'mr_label': label,
+ 'mr_eos': eos,
+ 'mr_table_id': table_id,
+ 'mr_classify_table_index': classify_table_index,
+ 'mr_create_table_if_needed': create_vrf_if_needed,
+ 'mr_is_add': is_add,
+ 'mr_is_classify': is_classify,
+ 'mr_is_multipath': is_multipath,
+ 'mr_is_resolve_host': is_resolve_host,
+ 'mr_is_resolve_attached': is_resolve_attached,
+ 'mr_next_hop_proto_is_ip4': next_hop_proto_is_ip4,
+ 'mr_next_hop_weight': next_hop_weight,
+ 'mr_next_hop': next_hop_address,
+ 'mr_next_hop_n_out_labels': next_hop_n_out_labels,
+ 'mr_next_hop_sw_if_index': next_hop_sw_if_index,
+ 'mr_next_hop_table_id': next_hop_table_id,
+ 'mr_next_hop_via_label': next_hop_via_label,
+ 'mr_next_hop_out_label_stack': next_hop_out_label_stack})
+
+ def mpls_ip_bind_unbind(
+ self,
+ label,
+ dst_address,
+ dst_address_length,
+ table_id=0,
+ ip_table_id=0,
+ is_ip4=1,
+ create_vrf_if_needed=0,
+ is_bind=1):
+ """
+ """
+ return self.api(
+ self.papi.mpls_ip_bind_unbind,
+ {'mb_mpls_table_id': table_id,
+ 'mb_label': label,
+ 'mb_ip_table_id': ip_table_id,
+ 'mb_create_table_if_needed': create_vrf_if_needed,
+ 'mb_is_bind': is_bind,
+ 'mb_is_ip4': is_ip4,
+ 'mb_address_length': dst_address_length,
+ 'mb_address': dst_address})
+
+ def mpls_tunnel_add_del(
+ self,
+ tun_sw_if_index,
+ next_hop_proto_is_ip4,
+ next_hop_address,
+ next_hop_sw_if_index=0xFFFFFFFF,
+ next_hop_table_id=0,
+ next_hop_weight=1,
+ next_hop_n_out_labels=0,
+ next_hop_out_label_stack=[],
+ next_hop_via_label=MPLS_LABEL_INVALID,
+ create_vrf_if_needed=0,
+ is_add=1,
+ l2_only=0):
+ """
+
+ :param dst_address_length:
+ :param next_hop_sw_if_index: (Default value = 0xFFFFFFFF)
+ :param dst_address:
+ :param next_hop_address:
+ :param next_hop_sw_if_index: (Default value = 0xFFFFFFFF)
+ :param vrf_id: (Default value = 0)
+ :param lookup_in_vrf: (Default value = 0)
+ :param classify_table_index: (Default value = 0xFFFFFFFF)
+ :param create_vrf_if_needed: (Default value = 0)
+ :param is_add: (Default value = 1)
+ :param is_drop: (Default value = 0)
+ :param is_ipv6: (Default value = 0)
+ :param is_local: (Default value = 0)
+ :param is_classify: (Default value = 0)
+ :param is_multipath: (Default value = 0)
+ :param is_resolve_host: (Default value = 0)
+ :param is_resolve_attached: (Default value = 0)
+ :param not_last: (Default value = 0)
+ :param next_hop_weight: (Default value = 1)
+
+ """
+ return self.api(
+ self.papi.mpls_tunnel_add_del,
+ {'mt_sw_if_index': tun_sw_if_index,
+ 'mt_is_add': is_add,
+ 'mt_l2_only': l2_only,
+ 'mt_next_hop_proto_is_ip4': next_hop_proto_is_ip4,
+ 'mt_next_hop_weight': next_hop_weight,
+ 'mt_next_hop': next_hop_address,
+ 'mt_next_hop_n_out_labels': next_hop_n_out_labels,
+ 'mt_next_hop_sw_if_index': next_hop_sw_if_index,
+ 'mt_next_hop_table_id': next_hop_table_id,
+ 'mt_next_hop_out_label_stack': next_hop_out_label_stack})
+
+ def snat_interface_add_del_feature(
+ self,
+ sw_if_index,
+ is_inside=1,
+ is_add=1):
+ """Enable/disable S-NAT feature on the interface
+
+ :param sw_if_index: Software index of the interface
+ :param is_inside: 1 if inside, 0 if outside (Default value = 1)
+ :param is_add: 1 if add, 0 if delete (Default value = 1)
+ """
+ return self.api(
+ self.papi.snat_interface_add_del_feature,
+ {'is_add': is_add,
+ 'is_inside': is_inside,
+ 'sw_if_index': sw_if_index})
+
+ def snat_add_static_mapping(
+ self,
+ local_ip,
+ external_ip,
+ local_port=0,
+ external_port=0,
+ addr_only=1,
+ vrf_id=0,
+ is_add=1,
+ is_ip4=1):
+ """Add/delete S-NAT static mapping
+
+ :param local_ip: Local IP address
+ :param external_ip: External IP address
+ :param local_port: Local port number (Default value = 0)
+ :param external_port: External port number (Default value = 0)
+ :param addr_only: 1 if address only mapping, 0 if address and port
+ :param vrf_id: VRF ID
+ :param is_add: 1 if add, 0 if delete (Default value = 1)
+ :param is_ip4: 1 if address type is IPv4 (Default value = 1)
+ """
+ return self.api(
+ self.papi.snat_add_static_mapping,
+ {'is_add': is_add,
+ 'is_ip4': is_ip4,
+ 'addr_only': addr_only,
+ 'local_ip_address': local_ip,
+ 'external_ip_address': external_ip,
+ 'local_port': local_port,
+ 'external_port': external_port,
+ 'vrf_id': vrf_id})
+
+ def snat_add_address_range(
+ self,
+ first_ip_address,
+ last_ip_address,
+ is_add=1,
+ is_ip4=1):
+ """Add/del S-NAT address range
+
+ :param first_ip_address: First IP address
+ :param last_ip_address: Last IP address
+ :param is_add: 1 if add, 0 if delete (Default value = 1)
+ :param is_ip4: 1 if address type is IPv4 (Default value = 1)
+ """
+ return self.api(
+ self.papi.snat_add_address_range,
+ {'is_ip4': is_ip4,
+ 'first_ip_address': first_ip_address,
+ 'last_ip_address': last_ip_address,
+ 'is_add': is_add})
+
+ def snat_address_dump(self):
+ """Dump S-NAT addresses
+ :return: Dictionary of S-NAT addresses
+ """
+ return self.api(self.papi.snat_address_dump, {})
+
+ def snat_interface_dump(self):
+ """Dump interfaces with S-NAT feature
+ :return: Dictionary of interfaces with S-NAT feature
+ """
+ return self.api(self.papi.snat_interface_dump, {})
+
+ def snat_static_mapping_dump(self):
+ """Dump S-NAT static mappings
+ :return: Dictionary of S-NAT static mappings
+ """
+ return self.api(self.papi.snat_static_mapping_dump, {})
+
+ def snat_show_config(self):
+ """Show S-NAT config
+ :return: S-NAT config parameters
+ """
+ return self.api(self.papi.snat_show_config, {})
+
+ def control_ping(self):
+ self.api(self.papi.control_ping)
+
+ def bfd_udp_add(self, sw_if_index, desired_min_tx, required_min_rx,
+ detect_mult, local_addr, peer_addr, is_ipv6=0):
+ return self.api(self.papi.bfd_udp_add,
+ {
+ 'sw_if_index': sw_if_index,
+ 'desired_min_tx': desired_min_tx,
+ 'required_min_rx': required_min_rx,
+ 'local_addr': local_addr,
+ 'peer_addr': peer_addr,
+ 'is_ipv6': is_ipv6,
+ 'detect_mult': detect_mult,
+ })
+
+ def bfd_udp_del(self, sw_if_index, local_addr, peer_addr, is_ipv6=0):
+ return self.api(self.papi.bfd_udp_del,
+ {
+ 'sw_if_index': sw_if_index,
+ 'local_addr': local_addr,
+ 'peer_addr': peer_addr,
+ 'is_ipv6': is_ipv6,
+ })
+
+ def bfd_udp_session_dump(self):
+ return self.api(self.papi.bfd_udp_session_dump, {})
+
+ def bfd_session_set_flags(self, bs_idx, admin_up_down):
+ return self.api(self.papi.bfd_session_set_flags, {
+ 'bs_index': bs_idx,
+ 'admin_up_down': admin_up_down,
+ })
+
+ def want_bfd_events(self, enable_disable=1):
+ return self.api(self.papi.want_bfd_events, {
+ 'enable_disable': enable_disable,
+ 'pid': os.getpid(),
+ })
+
+ def classify_add_del_table(
+ self,
+ is_add,
+ mask,
+ match_n_vectors=1,
+ table_index=0xFFFFFFFF,
+ nbuckets=2,
+ memory_size=2097152,
+ skip_n_vectors=0,
+ next_table_index=0xFFFFFFFF,
+ miss_next_index=0xFFFFFFFF,
+ current_data_flag=0,
+ current_data_offset=0):
+
+ """
+ :param is_add:
+ :param mask:
+ :param match_n_vectors (Default value = 1):
+ :param table_index (Default value = 0xFFFFFFFF)
+ :param nbuckets: (Default value = 2)
+ :param memory_size: (Default value = 2097152)
+ :param skip_n_vectors: (Default value = 0)
+ :param next_table_index: (Default value = 0xFFFFFFFF)
+ :param miss_next_index: (Default value = 0xFFFFFFFF)
+ :param current_data_flag: (Default value = 0)
+ :param current_data_offset: (Default value = 0)
+ """
+
+ return self.api(
+ self.papi.classify_add_del_table,
+ {'is_add' : is_add,
+ 'table_index' : table_index,
+ 'nbuckets' : nbuckets,
+ 'memory_size': memory_size,
+ 'skip_n_vectors' : skip_n_vectors,
+ 'match_n_vectors' : match_n_vectors,
+ 'next_table_index' : next_table_index,
+ 'miss_next_index' : miss_next_index,
+ 'current_data_flag' : current_data_flag,
+ 'current_data_offset' : current_data_offset,
+ 'mask' : mask})
+
+ def classify_add_del_session(
+ self,
+ is_add,
+ table_index,
+ match,
+ opaque_index=0xFFFFFFFF,
+ hit_next_index=0xFFFFFFFF,
+ advance=0,
+ action=0,
+ metadata=0):
+ """
+ :param is_add:
+ :param table_index:
+ :param match:
+ :param opaque_index: (Default value = 0xFFFFFFFF)
+ :param hit_next_index: (Default value = 0xFFFFFFFF)
+ :param advance: (Default value = 0)
+ :param action: (Default value = 0)
+ :param metadata: (Default value = 0)
+ """
+
+ return self.api(
+ self.papi.classify_add_del_session,
+ {'is_add' : is_add,
+ 'table_index' : table_index,
+ 'hit_next_index' : hit_next_index,
+ 'opaque_index' : opaque_index,
+ 'advance' : advance,
+ 'action' : action,
+ 'metadata' : metadata,
+ 'match' : match})
+
+ def input_acl_set_interface(
+ self,
+ is_add,
+ sw_if_index,
+ ip4_table_index=0xFFFFFFFF,
+ ip6_table_index=0xFFFFFFFF,
+ l2_table_index=0xFFFFFFFF):
+ """
+ :param is_add:
+ :param sw_if_index:
+ :param ip4_table_index: (Default value = 0xFFFFFFFF)
+ :param ip6_table_index: (Default value = 0xFFFFFFFF)
+ :param l2_table_index: (Default value = 0xFFFFFFFF)
+ """
+
+ return self.api(
+ self.papi.input_acl_set_interface,
+ {'sw_if_index' : sw_if_index,
+ 'ip4_table_index' : ip4_table_index,
+ 'ip6_table_index' : ip6_table_index,
+ 'l2_table_index' : l2_table_index,
+ 'is_add' : is_add})
diff --git a/vpp/test/vpp_pg_interface.py b/vpp/test/vpp_pg_interface.py
new file mode 100644
index 00000000..634d7d3e
--- /dev/null
+++ b/vpp/test/vpp_pg_interface.py
@@ -0,0 +1,326 @@
+import os
+import time
+from scapy.utils import wrpcap, rdpcap, PcapReader
+from vpp_interface import VppInterface
+
+from scapy.layers.l2 import Ether, ARP
+from scapy.layers.inet6 import IPv6, ICMPv6ND_NS, ICMPv6ND_NA,\
+ ICMPv6NDOptSrcLLAddr, ICMPv6NDOptDstLLAddr, ICMPv6ND_RA, RouterAlert, \
+ IPv6ExtHdrHopByHop
+from util import ppp, ppc
+
+
+def is_ipv6_misc(p):
+ """ Is packet one of uninteresting IPv6 broadcasts? """
+ if p.haslayer(ICMPv6ND_RA):
+ return True
+ if p.haslayer(IPv6ExtHdrHopByHop):
+ for o in p[IPv6ExtHdrHopByHop].options:
+ if isinstance(o, RouterAlert):
+ return True
+ return False
+
+
+class VppPGInterface(VppInterface):
+ """
+ VPP packet-generator interface
+ """
+
+ @property
+ def pg_index(self):
+ """packet-generator interface index assigned by VPP"""
+ return self._pg_index
+
+ @property
+ def out_path(self):
+ """pcap file path - captured packets"""
+ return self._out_path
+
+ @property
+ def in_path(self):
+ """ pcap file path - injected packets"""
+ return self._in_path
+
+ @property
+ def capture_cli(self):
+ """CLI string to start capture on this interface"""
+ return self._capture_cli
+
+ @property
+ def cap_name(self):
+ """capture name for this interface"""
+ return self._cap_name
+
+ @property
+ def input_cli(self):
+ """CLI string to load the injected packets"""
+ return self._input_cli
+
+ @property
+ def in_history_counter(self):
+ """Self-incrementing counter used when renaming old pcap files"""
+ v = self._in_history_counter
+ self._in_history_counter += 1
+ return v
+
+ @property
+ def out_history_counter(self):
+ """Self-incrementing counter used when renaming old pcap files"""
+ v = self._out_history_counter
+ self._out_history_counter += 1
+ return v
+
+ def __init__(self, test, pg_index):
+ """ Create VPP packet-generator interface """
+ r = test.vapi.pg_create_interface(pg_index)
+ self._sw_if_index = r.sw_if_index
+
+ super(VppPGInterface, self).__init__(test)
+
+ self._in_history_counter = 0
+ self._out_history_counter = 0
+ self._pg_index = pg_index
+ self._out_file = "pg%u_out.pcap" % self.pg_index
+ self._out_path = self.test.tempdir + "/" + self._out_file
+ self._in_file = "pg%u_in.pcap" % self.pg_index
+ self._in_path = self.test.tempdir + "/" + self._in_file
+ self._capture_cli = "packet-generator capture pg%u pcap %s" % (
+ self.pg_index, self.out_path)
+ self._cap_name = "pcap%u" % self.sw_if_index
+ self._input_cli = "packet-generator new pcap %s source pg%u name %s" % (
+ self.in_path, self.pg_index, self.cap_name)
+
+ def enable_capture(self):
+ """ Enable capture on this packet-generator interface"""
+ try:
+ if os.path.isfile(self.out_path):
+ os.rename(self.out_path,
+ "%s/history.[timestamp:%f].[%s-counter:%04d].%s" %
+ (self.test.tempdir,
+ time.time(),
+ self.name,
+ self.out_history_counter,
+ self._out_file))
+ except:
+ pass
+ # FIXME this should be an API, but no such exists atm
+ self.test.vapi.cli(self.capture_cli)
+ self._pcap_reader = None
+
+ def add_stream(self, pkts):
+ """
+ Add a stream of packets to this packet-generator
+
+ :param pkts: iterable packets
+
+ """
+ try:
+ if os.path.isfile(self.in_path):
+ os.rename(self.in_path,
+ "%s/history.[timestamp:%f].[%s-counter:%04d].%s" %
+ (self.test.tempdir,
+ time.time(),
+ self.name,
+ self.in_history_counter,
+ self._in_file))
+ except:
+ pass
+ wrpcap(self.in_path, pkts)
+ self.test.register_capture(self.cap_name)
+ # FIXME this should be an API, but no such exists atm
+ self.test.vapi.cli(self.input_cli)
+
+ def get_capture(self, remark=None, filter_fn=is_ipv6_misc):
+ """
+ Get captured packets
+
+ :param remark: remark printed into debug logs
+ :param filter_fn: filter applied to each packet, packets for which
+ the filter returns True are removed from capture
+ :returns: iterable packets
+ """
+ try:
+ self.wait_for_capture_file()
+ output = rdpcap(self.out_path)
+ self.test.logger.debug("Capture has %s packets" % len(output.res))
+ except IOError: # TODO
+ self.test.logger.debug("File %s does not exist, probably because no"
+ " packets arrived" % self.out_path)
+ if remark:
+ raise Exception("No packets captured on %s(%s)" %
+ (self.name, remark))
+ else:
+ raise Exception("No packets captured on %s" % self.name)
+ before = len(output.res)
+ if filter_fn:
+ output.res = [p for p in output.res if not filter_fn(p)]
+ removed = len(output.res) - before
+ if removed:
+ self.test.logger.debug(
+ "Filtered out %s packets from capture (returning %s)" %
+ (removed, len(output.res)))
+ return output
+
+ def assert_nothing_captured(self, remark=None):
+ if os.path.isfile(self.out_path):
+ try:
+ capture = self.get_capture(remark=remark)
+ self.test.logger.error(
+ ppc("Unexpected packets captured:", capture))
+ except:
+ pass
+ if remark:
+ raise AssertionError(
+ "Capture file present for interface %s(%s)" %
+ (self.name, remark))
+ else:
+ raise AssertionError("Capture file present for interface %s" %
+ self.name)
+
+ def wait_for_capture_file(self, timeout=1):
+ """
+ Wait until pcap capture file appears
+
+ :param timeout: How long to wait for the packet (default 1s)
+
+ :raises Exception: if the capture file does not appear within timeout
+ """
+ limit = time.time() + timeout
+ if not os.path.isfile(self.out_path):
+ self.test.logger.debug(
+ "Waiting for capture file to appear, timeout is %ss", timeout)
+ else:
+ self.test.logger.debug("Capture file already exists")
+ return
+ while time.time() < limit:
+ if os.path.isfile(self.out_path):
+ break
+ time.sleep(0) # yield
+ if os.path.isfile(self.out_path):
+ self.test.logger.debug("Capture file appeared after %fs" %
+ (time.time() - (limit - timeout)))
+ else:
+ self.test.logger.debug("Timeout - capture file still nowhere")
+ raise Exception("Capture file did not appear within timeout")
+
+ def wait_for_packet(self, timeout):
+ """
+ Wait for next packet captured with a timeout
+
+ :param timeout: How long to wait for the packet
+
+ :returns: Captured packet if no packet arrived within timeout
+ :raises Exception: if no packet arrives within timeout
+ """
+ limit = time.time() + timeout
+ if self._pcap_reader is None:
+ self.wait_for_capture_file(timeout)
+ self._pcap_reader = PcapReader(self.out_path)
+
+ self.test.logger.debug("Waiting for packet")
+ while time.time() < limit:
+ p = self._pcap_reader.recv()
+ if p is not None:
+ self.test.logger.debug("Packet received after %fs",
+ (time.time() - (limit - timeout)))
+ return p
+ time.sleep(0) # yield
+ self.test.logger.debug("Timeout - no packets received")
+ raise Exception("Packet didn't arrive within timeout")
+
+ def create_arp_req(self):
+ """Create ARP request applicable for this interface"""
+ return (Ether(dst="ff:ff:ff:ff:ff:ff", src=self.remote_mac) /
+ ARP(op=ARP.who_has, pdst=self.local_ip4,
+ psrc=self.remote_ip4, hwsrc=self.remote_mac))
+
+ def create_ndp_req(self):
+ """Create NDP - NS applicable for this interface"""
+ return (Ether(dst="ff:ff:ff:ff:ff:ff", src=self.remote_mac) /
+ IPv6(src=self.remote_ip6, dst=self.local_ip6) /
+ ICMPv6ND_NS(tgt=self.local_ip6) /
+ ICMPv6NDOptSrcLLAddr(lladdr=self.remote_mac))
+
+ def resolve_arp(self, pg_interface=None):
+ """Resolve ARP using provided packet-generator interface
+
+ :param pg_interface: interface used to resolve, if None then this
+ interface is used
+
+ """
+ if pg_interface is None:
+ pg_interface = self
+ self.test.logger.info("Sending ARP request for %s on port %s" %
+ (self.local_ip4, pg_interface.name))
+ arp_req = self.create_arp_req()
+ pg_interface.add_stream(arp_req)
+ pg_interface.enable_capture()
+ self.test.pg_start()
+ self.test.logger.info(self.test.vapi.cli("show trace"))
+ try:
+ arp_reply = pg_interface.get_capture(filter_fn=None)
+ except:
+ self.test.logger.info("No ARP received on port %s" %
+ pg_interface.name)
+ return
+ arp_reply = arp_reply[0]
+ # Make Dot1AD packet content recognizable to scapy
+ if arp_reply.type == 0x88a8:
+ arp_reply.type = 0x8100
+ arp_reply = Ether(str(arp_reply))
+ try:
+ if arp_reply[ARP].op == ARP.is_at:
+ self.test.logger.info("VPP %s MAC address is %s " %
+ (self.name, arp_reply[ARP].hwsrc))
+ self._local_mac = arp_reply[ARP].hwsrc
+ else:
+ self.test.logger.info(
+ "No ARP received on port %s" %
+ pg_interface.name)
+ except:
+ self.test.logger.error(
+ ppp("Unexpected response to ARP request:", arp_reply))
+ raise
+
+ def resolve_ndp(self, pg_interface=None):
+ """Resolve NDP using provided packet-generator interface
+
+ :param pg_interface: interface used to resolve, if None then this
+ interface is used
+
+ """
+ if pg_interface is None:
+ pg_interface = self
+ self.test.logger.info("Sending NDP request for %s on port %s" %
+ (self.local_ip6, pg_interface.name))
+ ndp_req = self.create_ndp_req()
+ pg_interface.add_stream(ndp_req)
+ pg_interface.enable_capture()
+ self.test.pg_start()
+ self.test.logger.info(self.test.vapi.cli("show trace"))
+ replies = pg_interface.get_capture(filter_fn=None)
+ if replies is None or len(replies) == 0:
+ self.test.logger.info(
+ "No NDP received on port %s" %
+ pg_interface.name)
+ return
+ # Enabling IPv6 on an interface can generate more than the
+ # ND reply we are looking for (namely MLD). So loop through
+ # the replies to look for want we want.
+ for ndp_reply in replies:
+ # Make Dot1AD packet content recognizable to scapy
+ if ndp_reply.type == 0x88a8:
+ ndp_reply.type = 0x8100
+ ndp_reply = Ether(str(ndp_reply))
+ try:
+ ndp_na = ndp_reply[ICMPv6ND_NA]
+ opt = ndp_na[ICMPv6NDOptDstLLAddr]
+ self.test.logger.info("VPP %s MAC address is %s " %
+ (self.name, opt.lladdr))
+ self._local_mac = opt.lladdr
+ except:
+ self.test.logger.info(
+ ppp("Unexpected response to NDP request:", ndp_reply))
+ # if no packets above provided the local MAC, then this failed.
+ if not hasattr(self, '_local_mac'):
+ raise
diff --git a/vpp/test/vpp_sub_interface.py b/vpp/test/vpp_sub_interface.py
new file mode 100644
index 00000000..b4c415ca
--- /dev/null
+++ b/vpp/test/vpp_sub_interface.py
@@ -0,0 +1,140 @@
+from scapy.layers.l2 import Ether, Dot1Q
+from abc import abstractmethod, ABCMeta
+from vpp_interface import VppInterface
+from vpp_pg_interface import VppPGInterface
+
+
+class VppSubInterface(VppPGInterface):
+ __metaclass__ = ABCMeta
+
+ @property
+ def parent(self):
+ """Parent interface for this sub-interface"""
+ return self._parent
+
+ @property
+ def sub_id(self):
+ """Sub-interface ID"""
+ return self._sub_id
+
+ def __init__(self, test, parent, sub_id):
+ VppInterface.__init__(self, test)
+ self._parent = parent
+ self._parent.add_sub_if(self)
+ self._sub_id = sub_id
+
+ @abstractmethod
+ def create_arp_req(self):
+ pass
+
+ @abstractmethod
+ def create_ndp_req(self):
+ pass
+
+ def resolve_arp(self):
+ super(VppSubInterface, self).resolve_arp(self.parent)
+
+ def resolve_ndp(self):
+ super(VppSubInterface, self).resolve_ndp(self.parent)
+
+ @abstractmethod
+ def add_dot1_layer(self, pkt):
+ pass
+
+ def remove_vpp_config(self):
+ self.test.vapi.delete_subif(self._sw_if_index)
+
+
+class VppDot1QSubint(VppSubInterface):
+
+ @property
+ def vlan(self):
+ """VLAN tag"""
+ return self._vlan
+
+ def __init__(self, test, parent, sub_id, vlan=None):
+ if vlan is None:
+ vlan = sub_id
+ self._vlan = vlan
+ r = test.vapi.create_vlan_subif(parent.sw_if_index, vlan)
+ self._sw_if_index = r.sw_if_index
+ super(VppDot1QSubint, self).__init__(test, parent, sub_id)
+
+ def create_arp_req(self):
+ packet = VppPGInterface.create_arp_req(self)
+ return self.add_dot1_layer(packet)
+
+ def create_ndp_req(self):
+ packet = VppPGInterface.create_ndp_req(self)
+ return self.add_dot1_layer(packet)
+
+ def add_dot1_layer(self, packet):
+ payload = packet.payload
+ packet.remove_payload()
+ packet.add_payload(Dot1Q(vlan=self.sub_id) / payload)
+ return packet
+
+ def remove_dot1_layer(self, packet):
+ payload = packet.payload
+ self.test.instance().assertEqual(type(payload), Dot1Q)
+ self.test.instance().assertEqual(payload.vlan, self.vlan)
+ payload = payload.payload
+ packet.remove_payload()
+ packet.add_payload(payload)
+ return packet
+
+
+class VppDot1ADSubint(VppSubInterface):
+
+ @property
+ def outer_vlan(self):
+ """Outer VLAN tag"""
+ return self._outer_vlan
+
+ @property
+ def inner_vlan(self):
+ """Inner VLAN tag"""
+ return self._inner_vlan
+
+ def __init__(self, test, parent, sub_id, outer_vlan, inner_vlan):
+ r = test.vapi.create_subif(parent.sw_if_index, sub_id, outer_vlan,
+ inner_vlan, dot1ad=1, two_tags=1,
+ exact_match=1)
+ self._sw_if_index = r.sw_if_index
+ super(VppDot1ADSubint, self).__init__(test, parent, sub_id)
+ self.DOT1AD_TYPE = 0x88A8
+ self.DOT1Q_TYPE = 0x8100
+ self._outer_vlan = outer_vlan
+ self._inner_vlan = inner_vlan
+
+ def create_arp_req(self):
+ packet = VppPGInterface.create_arp_req(self)
+ return self.add_dot1_layer(packet)
+
+ def create_ndp_req(self):
+ packet = VppPGInterface.create_ndp_req(self)
+ return self.add_dot1_layer(packet)
+
+ def add_dot1_layer(self, packet):
+ payload = packet.payload
+ packet.remove_payload()
+ packet.add_payload(Dot1Q(vlan=self.outer_vlan) /
+ Dot1Q(vlan=self.inner_vlan) / payload)
+ packet.type = self.DOT1AD_TYPE
+ return packet
+
+ def remove_dot1_layer(self, packet):
+ self.test.instance().assertEqual(type(packet), Ether)
+ self.test.instance().assertEqual(packet.type, self.DOT1AD_TYPE)
+ packet.type = self.DOT1Q_TYPE
+ packet = Ether(str(packet))
+ payload = packet.payload
+ self.test.instance().assertEqual(type(payload), Dot1Q)
+ self.test.instance().assertEqual(payload.vlan, self.outer_vlan)
+ payload = payload.payload
+ self.test.instance().assertEqual(type(payload), Dot1Q)
+ self.test.instance().assertEqual(payload.vlan, self.inner_vlan)
+ payload = payload.payload
+ packet.remove_payload()
+ packet.add_payload(payload)
+ return packet