aboutsummaryrefslogtreecommitdiffstats
path: root/test/Makefile
blob: d8bbf4d5c69831dd6ba2aeb7fc03d69407a28ac2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
.PHONY: verify-test-dir

FAILED_DIR=/tmp/vpp-failed-unittests/

verify-test-dir:
ifndef TEST_DIR
	$(error TEST_DIR is not set)
endif

.PHONY: verify-no-running-vpp

ifdef VPP_ZOMBIE_NOCHECK
VPP_PIDS=
else
VPP_PIDS=$(shell pgrep -d, -x vpp_main)
endif

ifeq ($(DEBUG),gdb)
FORCE_FOREGROUND=1
else ifeq ($(DEBUG),gdbserver)
FORCE_FOREGROUND=1
else ifeq ($(DEBUG),core)
FORCE_FOREGROUND=1
else ifeq ($(STEP),yes)
FORCE_FOREGROUND=1
else ifeq ($(STEP),y)
FORCE_FOREGROUND=1
else ifeq ($(STEP),1)
FORCE_FOREGROUND=1
else
FORCE_FOREGROUND=0
endif

ifdef PROFILE_OUTPUT
PROFILE_OUTPUT_OPTS=-o $(PROFILE_OUTPUT)
endif

ifndef PROFILE_SORT_BY
PROFILE_SORT_BY=cumtime
endif

ifeq ($(PROFILE),1)
PYTHON_PROFILE_OPTS=-m cProfile $(PROFILE_OUTPUT_OPTS) -s $(PROFILE_SORT_BY)
FORCE_FOREGROUND=1
endif

verify-no-running-vpp:
	@if [ "$(VPP_PIDS)" != "" ]; then \
		echo; \
		echo "*** Existing vpp processes detected (PID(s): $(VPP_PIDS)). Running tests under these conditions is not supported. ***"; \
		echo; \
		ps -fp $(VPP_PIDS);\
		echo; \
		false; \
	fi

UNITTEST_EXTRA_OPTS=
UNITTEST_FAILFAST_OPTS=

ifeq ($(FAILFAST),1)
UNITTEST_EXTRA_OPTS=-f
endif

ifneq ($(EXTERN_TESTS),)
UNITTEST_EXTRA_OPTS=$(UNITTEST_FAILFAST_OPTS) -d $(EXTERN_TESTS)
endif

VENV_PATH=$(TEST_DIR)/run/venv

ifeq ($(TEST_DEBUG),1)
TEST_RUN_DIR:=$(VENV_PATH)/run-debug
else
TEST_RUN_DIR:=$(VENV_PATH)/run
endif

ifeq ($(PYTHON),)
PYTHON_INTERP=python2.7
else
PYTHON_INTERP=$(PYTHON)
endif

PYTHON_VERSION=$(shell $(PYTHON_INTERP) -c 'import sys; print(sys.version_info.major)')
PIP_VERSION=19.1.1
PIP_TOOLS_VERSION=3.8.0   # Keep in sync with requirements.txt
PYTHON_DEPENDS=requirements-$(PYTHON_VERSION).txt
SCAPY_SOURCE=$(shell find $(VENV_PATH)/lib/$(PYTHON_INTERP) -name site-packages)
BUILD_COV_DIR=$(TEST_DIR)/coverage

PIP_TOOLS_INSTALL_DONE=$(TEST_RUN_DIR)/pip-tools-install-$(PYTHON_VERSION).done
PIP_INSTALL_DONE=$(TEST_RUN_DIR)/pip-install-$(PYTHON_VERSION).done
PIP_PATCH_DONE=$(TEST_RUN_DIR)/pip-patch-$(PYTHON_VERSION).done
PAPI_INSTALL_DONE=$(TEST_RUN_DIR)/papi-install-$(PYTHON_VERSION).done

PAPI_INSTALL_FLAGS=$(PIP_INSTALL_DONE) $(PIP_PATCH_DONE) $(PAPI_INSTALL_DONE)

$(PIP_TOOLS_INSTALL_DONE):
	@rm -rf $(VENV_PATH)
	@mkdir -p $(TEST_RUN_DIR)
	@virtualenv $(VENV_PATH) -p $(PYTHON_INTERP)
	# pip version pinning
	@bash -c "source $(VENV_PATH)/bin/activate && \
		  $(PYTHON_INTERP) -m pip install pip===$(PIP_VERSION)"
	@bash -c "source $(VENV_PATH)/bin/activate && \
		  $(PYTHON_INTERP) -m pip install pip-tools===$(PIP_TOOLS_VERSION)"
	@touch $@

$(PYTHON_DEPENDS): $(PIP_TOOLS_INSTALL_DONE) requirements.txt
	@bash -c "source $(VENV_PATH)/bin/activate && \
		  CUSTOM_COMPILE_COMMAND='make test-refresh-deps (or update requirements.txt)' \
		  $(PYTHON_INTERP) -m piptools compile -q --generate-hashes requirements.txt --output-file $@"

$(PIP_INSTALL_DONE): $(PYTHON_DEPENDS)
	@bash -c "source $(VENV_PATH)/bin/activate && \
		  $(PYTHON_INTERP) -m piptools sync $(PYTHON_DEPENDS)"
	@touch $@

$(PIP_PATCH_DONE): $(PIP_INSTALL_DONE)
	@echo --- patching ---
	@sleep 1 # Ensure python recompiles patched *.py files -> *.pyc
	for f in $(CURDIR)/patches/scapy-2.4/*.patch ; do \
		echo Applying patch: $$(basename $$f) ; \
		patch --forward -p1 -d $(SCAPY_SOURCE) < $$f ; \
		retCode=$$?; \
		[ $$retCode -gt 1 ] && exit $$retCode; \
	done; \
	touch $@

$(PAPI_INSTALL_DONE): $(PIP_PATCH_DONE)
	@bash -c "source $(VENV_PATH)/bin/activate && $(PYTHON_INTERP) -m pip install -e $(WS_ROOT)/src/vpp-api/python"
	@touch $@

.PHONY: update-deps clear-deps
refresh-deps: clean-deps $(PYTHON_DEPENDS)

clean-deps:
	@rm -f $(PYTHON_DEPENDS)

ifneq ($(EXTERN_PLUGIN_SRC_DIR),)
PLUGIN_SRC_DIR=$(EXTERN_PLUGIN_SRC_DIR)
else
PLUGIN_SRC_DIR=$(WS_ROOT)/src/plugins
endif
PLUGIN_TEST_DIRS=$(shell find $(PLUGIN_SRC_DIR) -type d -name test -exec echo -n " -d {}" \;)

define retest-func
@env FORCE_FOREGROUND=$(FORCE_FOREGROUND) FAILED_DIR=$(FAILED_DIR) VENV_PATH=$(VENV_PATH) scripts/setsid_wrapper.sh $(FORCE_FOREGROUND) $(VENV_PATH)/bin/activate $(PYTHON_INTERP) $(PYTHON_PROFILE_OPTS) run_tests.py -d $(TEST_DIR)$(PLUGIN_TEST_DIRS) $(UNITTEST_EXTRA_OPTS) || env FAILED_DIR=$(FAILED_DIR) COMPRESS_FAILED_TEST_LOGS=$(COMPRESS_FAILED_TEST_LOGS) scripts/compress_failed.sh
endef

.PHONY: sanity

ifeq ($(SANITY),no)
SANITY_IMPORT_VPP_PAPI_CMD=true
SANITY_RUN_VPP_CMD=true
else
SANITY_IMPORT_VPP_PAPI_CMD=source $(VENV_PATH)/bin/activate && $(PYTHON_INTERP) sanity_import_vpp_papi.py
SANITY_RUN_VPP_CMD=source $(VENV_PATH)/bin/activate && $(PYTHON_INTERP) sanity_run_vpp.py
endif

ifndef TEST_JOBS
PARALLEL_ILLEGAL=0
else ifeq ($(FORCE_FOREGROUND),0)
PARALLEL_ILLEGAL=0
else ifeq ($(TEST_JOBS),auto)
PARALLEL_ILLEGAL=0
else ifeq ($(TEST_JOBS),1)
PARALLEL_ILLEGAL=0
else
PARALLEL_ILLEGAL=1
endif

sanity: verify-no-running-vpp
	@sys_req/dev_shm_size.sh
	@bash -c "test $(PARALLEL_ILLEGAL) -eq 0 ||\
	    (echo \"*******************************************************************\" &&\
		 echo \"* Sanity check failed, TEST_JOBS is not 1 or 'auto' and DEBUG, STEP or PROFILE is set\" &&\
	         echo \"*******************************************************************\" &&\
		 false)"
	@bash -c "$(SANITY_IMPORT_VPP_PAPI_CMD) ||\
		(echo \"*******************************************************************\" &&\
		 echo \"* Sanity check failed, cannot import vpp_papi\" &&\
		 echo \"* to debug: \" &&\
		 echo \"* 1. enter test shell:   make test-shell\" &&\
		 echo \"* 2. execute debugger:   gdb python -ex 'run sanity_import_vpp_papi.py'\" &&\
	         echo \"*******************************************************************\" &&\
		 false)"
	@bash -c "$(SANITY_RUN_VPP_CMD) ||\
		(echo \"*******************************************************************\" &&\
		 echo \"* Sanity check failed, cannot run vpp\" &&\
	         echo \"*******************************************************************\" &&\
		 false)"

.PHONY: ext
ext:
	make -C ext

test-dep: verify-test-dir $(PAPI_INSTALL_DONE)

test: verify-test-dir $(PAPI_INSTALL_DONE) ext sanity reset
	$(call retest-func)

retest: verify-test-dir sanity reset
	$(call retest-func)

shell: verify-test-dir $(PAPI_INSTALL_DONE)
	@echo "source $(VENV_PATH)/bin/activate;\
		echo '***';\
		echo VPP_BUILD_DIR=$(VPP_BUILD_DIR);\
		echo VPP_BIN=$(VPP_BIN);\
		echo VPP_PLUGIN_PATH=$(VPP_PLUGIN_PATH);\
		echo VPP_TEST_PLUGIN_PATH=$(VPP_TEST_PLUGIN_PATH);\
		echo VPP_INSTALL_PATH=$(VPP_INSTALL_PATH);\
		echo EXTERN_TESTS=$(EXTERN_TESTS);\
		echo EXTERN_PLUGINS=$(EXTERN_PLUGINS);\
                echo EXTERN_COV_DIR=$(EXTERN_COV_DIR);\
		echo LD_LIBRARY_PATH=$(LD_LIBRARY_PATH);\
		echo '***';\
		exec </dev/tty" | bash -i

.PHONY: wipe doc

reset:
	@rm -f /dev/shm/vpp-unittest-*
	@rm -rf /tmp/vpp-unittest-*
	@rm -rf $(FAILED_DIR)
	@mkdir $(FAILED_DIR)

wipe: reset
	@make -C ext clean
	@rm -rf $(VENV_PATH)
	@rm -f $(PAPI_INSTALL_FLAGS)

doc: verify-test-dir $(PIP_PATCH_DONE)
	@virtualenv $(VENV_PATH) -p python3
	@bash -c "source $(VENV_PATH)/bin/activate && python3 -m pip install sphinx sphinx-rtd-theme"
	@bash -c "source $(VENV_PATH)/bin/activate && make -C doc WS_ROOT=$(WS_ROOT) BR=$(BR) html"

.PHONY: wipe-doc

wipe-doc:
	@make -C doc wipe BR=$(BR)

cov: wipe-cov reset ext verify-test-dir $(PAPI_INSTALL_DONE)
	@lcov --zerocounters --directory $(VPP_BUILD_DIR)
	@test -z "$(EXTERN_COV_DIR)" || lcov --zerocounters --directory $(EXTERN_COV_DIR)
	$(call retest-func)
	@mkdir $(BUILD_COV_DIR)
	@lcov --capture --directory $(VPP_BUILD_DIR) --output-file $(BUILD_COV_DIR)/coverage.info
	@test -z "$(EXTERN_COV_DIR)" || lcov --capture --directory $(EXTERN_COV_DIR) --output-file $(BUILD_COV_DIR)/extern-coverage.info
	@genhtml $(BUILD_COV_DIR)/coverage.info --output-directory $(BUILD_COV_DIR)/html
	@test -z "$(EXTERN_COV_DIR)" || genhtml $(BUILD_COV_DIR)/extern-coverage.info --output-directory $(BUILD_COV_DIR)/extern-html
	@echo
	@echo "Build finished. Code coverage report is in $(BUILD_COV_DIR)/html/index.html"
	@test -z "$(EXTERN_COV_DIR)" || echo "Code coverage report for out-of-tree objects is in $(BUILD_COV_DIR)/extern-html/index.html"

.PHONY: wipe-cov

wipe-cov: wipe
	@rm -rf $(BUILD_COV_DIR)

.PHONY: papi-wipe

papi-wipe:
	@rm -rf $(PAPI_INSTALL_DONE)

.PHONY: checkstyle
checkstyle: verify-test-dir
	@virtualenv $(VENV_PATH) -p python3
	@bash -c "source $(VENV_PATH)/bin/activate && python3 -m pip install pycodestyle"
	@bash -c "source $(VENV_PATH)/bin/activate &&\
		pycodestyle --show-source --ignore=W504,E126,E241,E226,E305,E704,E741,E722 --exclude=$(WS_ROOT)/test/_*.py -v $(WS_ROOT)/test/*.py $(PLUGIN_SRC_DIR)/*/test/*.py ||\
		(echo \"*******************************************************************\" &&\
		 echo \"* Test framework PEP8 compliance check FAILED \" &&\
	         echo \"*******************************************************************\" &&\
		 false)"
	@echo "*******************************************************************"
	@echo "* Test framework PEP8 compliance check passed"
	@echo "*******************************************************************"

help:
	@echo "Running tests:"
	@echo ""
	@echo " test                - build and run (basic) functional tests"
	@echo " test-debug          - build and run (basic) functional tests (debug build)"
	@echo " test-all            - build and run (all) functional tests"
	@echo " test-all-debug      - build and run (all) functional tests (debug build)"
	@echo " retest              - run functional tests"
	@echo " retest-debug        - run functional tests (debug build)"
	@echo " papi-wipe           - rebuild vpp_papi sources"
	@echo " test-wipe           - wipe (temporary) files generated by unit tests"
	@echo " test-shell          - enter shell with test environment"
	@echo " test-shell-debug    - enter shell with test environment (debug build)"
	@echo ""
	@echo "Arguments controlling test runs:"
	@echo " V=[0|1|2]            - set test verbosity level"
	@echo "                        0=ERROR, 1=INFO, 2=DEBUG"
	@echo " TEST_JOBS=[<n>|auto] - use <n> parallel processes for test execution or automatic discovery of maximum acceptable processes (default: 1)"
	@echo " CACHE_OUTPUT=[0|1]   - cache VPP stdout/stderr and log as one block after test finishes (default: 1)"
	@echo " FAILFAST=[0|1]       - fail fast if 1, complete all tests if 0"
	@echo " TIMEOUT=<timeout>    - fail test suite if any single test takes longer than <timeout> (in seconds) to finish (default: 600)"
	@echo " RETRIES=<n>          - retry failed tests <n> times"
	@echo " DEBUG=<type>         - set VPP debugging kind"
	@echo "    DEBUG=core        - detect coredump and load it in gdb on crash"
	@echo "    DEBUG=gdb         - allow easy debugging by printing VPP PID"
	@echo "                        and waiting for user input before running"
	@echo "                        and tearing down a testcase"
	@echo "    DEBUG=gdbserver   - run gdb inside a gdb server, otherwise"
	@echo "                        same as above"
	@echo " STEP=[yes|no]        - ease debugging by stepping through a testcase"
	@echo " SANITY=[yes|no]      - perform sanity import of vpp-api/sanity vpp run before running tests (default: yes)"
	@echo " EXTENDED_TESTS=[1|y] - run extended tests"
	@echo " TEST=<filter>        - filter the set of tests:"
	@echo "    by file-name      - only run tests from specified file, e.g. TEST=test_bfd selects all tests from test_bfd.py"
	@echo "    by file-suffix    - same as file-name, but 'test_' is omitted e.g. TEST=bfd selects all tests from test_bfd.py"
	@echo "    by wildcard       - wildcard filter is <file>.<class>.<test function>, each can be replaced by '*'"
	@echo "                        e.g. TEST='test_bfd.*.*' is equivalent to above example of filter by file-name"
	@echo "                             TEST='bfd.*.*' is equivalent to above example of filter by file-suffix"
	@echo "                             TEST='bfd.BFDAPITestCase.*' selects all tests from test_bfd.py which are part of BFDAPITestCase class"
	@echo "                             TEST='bfd.BFDAPITestCase.test_add_bfd' selects a single test named test_add_bfd from test_bfd.py/BFDAPITestCase"
	@echo "                             TEST='*.*.test_add_bfd' selects all test functions named test_add_bfd from all files/classes"
	@echo ""
	@echo " VPP_ZOMBIE_NOCHECK=1 - skip checking for vpp (zombie) processes (CAUTION)"
	@echo " COREDUMP_SIZE=<size> - pass <size> as unix { coredump-size <size> } argument to vpp"
	@echo "                        e.g. COREDUMP_SIZE=4g"
	@echo "                             COREDUMP_SIZE=unlimited"
	@echo " COREDUMP_COMPRESS=1  - compress core files if not debugging them"
	@echo " EXTERN_TESTS=<path>  - path to out-of-tree test_<name>.py files containing test cases"
	@echo " EXTERN_PLUGINS=<path>- path to out-of-tree plugins to be loaded by vpp under test"
	@echo " EXTERN_COV_DIR=<path>- path to out-of-tree prefix, where source, object and .gcda files can be found for coverage report"
	@echo ""
	@echo " PROFILE=1            - enable profiling of test framework via cProfile module"
	@echo " PROFILE_SORT_BY=opt  - sort profiling report by opt - consult cProfile documentation for possible values (default: cumtime)"
	@echo " PROFILE_OUTPUT=file  - output profiling info to file - use absolute path (default: stdout)"
	@echo ""
	@echo " TEST_DEBUG=1         - turn on debugging of the test framework itself (expert)"
	@echo ""
	@echo " SKIP_AARCH64=1       - skip tests that are failing on the ARM platorm in FD.io CI"
	@echo ""
	@echo " SOCKET=1             - Communicate with VPP over Unix domain socket instead of SHM"
	@echo ""
	@echo "Creating test documentation"
	@echo " test-doc            - generate documentation for test framework"
	@echo " test-wipe-doc       - wipe documentation for test framework"
	@echo ""
	@echo "Creating test code coverage report"
	@echo " test-cov            - generate code coverage report for test framework"
	@echo " test-wipe-cov       - wipe code coverage report for test framework"
	@echo ""
	@echo "Verifying code-style"
	@echo " test-checkstyle     - check PEP8 compliance"
	@echo ""
s eBGP PIC-edge scenario are calculated by * BGP. Each peer is configured to always advertise its best external path to * its iBGP peers. Backup paths therefore send traffic from the PE back into the * core to an alternate PE. A PE may have multiple external paths, i.e. multiple * directly connected CEs, it may also have multiple backup PEs, however there * is no correlation between the two, so unlike LFA-FRR, the redundancy model is * N-M; N primary paths are backed-up by M backup paths - only when all primary * paths fail, then the cutover is performed onto the M backup paths. Note that * PE2 must be suitably configured to forward traffic on its external path that * was received from PE1. VPP FIB does not support external-internal-BGP (eiBGP) * load-balancing. * * As with LFA-FRR the use of primary and backup paths is not currently * supported, however, the use of a recursive-multi-path-adj, and a suitably * constrained hashing algorithm to choose from the primary or backup path sets, * would again provide the necessary shared object and hence the prefix scale * independent cutover. * * Astute readers will recognise that both of the eBGP PIC scenarios refer only * to a BGP free core. * * Fast convergence implementation options come in two flavours: * 1) Insert switches into the data-path. The switch represents the protected * resource. If the switch is 'on' the primary path is taken, otherwise * the backup path is taken. Testing the switch in the data-path comes with * an associated performance cost. A given packet may encounter more than * one protected resource as it is forwarded. This approach minimises * cutover times as packets will be forwarded on the backup path as soon * as the protected resource is detected to be down and the single switch * is tripped. However, it comes at a performance cost, which increases * with each shared resource a packet encounters in the data-path. * This approach is thus best suited to LFA-FRR where the protected routes * are non-recursive (i.e. encounter few shared resources) and the * expectation on cutover times is more stringent (<50msecs). * 2) Update shared objects. Identify objects in the data-path, that are * required to be present whether or not fast convergence is required (i.e. * adjacencies) that can be shared by multiple routes. Create a dependency * between these objects at the protected resource. When the protected * resource fails, each of the shared objects is updated in a way that all * users of it see a consistent change. This approach incurs no performance * penalty as the data-path structure is unchanged, however, the cutover * times are longer as more work is required when the resource fails. This * scheme is thus more appropriate to recursive prefixes (where the packet * will encounter multiple protected resources) and to fast-convergence * technologies where the cutover times are less stringent (i.e. PIC). * * Implementation: * --------------- * * Due to the requirements outlined above, not all routes known to FIB * (e.g. adj-fibs) are installed in forwarding. However, should circumstances * change, those routes will need to be added. This adds the requirement that * a FIB maintains two tables per-VRF, per-AF (where a 'table' is indexed by * prefix); the forwarding and non-forwarding tables. * * For DP speed in VPP we want the lookup in the forwarding table to directly * result in the ADJ. So the two tables; one contains all the routes (a * lookup therein yields a fib_entry_t), the other contains only the forwarding * routes (a lookup therein yields an ip_adjacency_t). The latter is used by the * DP. * This trades memory for forwarding performance. A good trade-off in VPP's * expected operating environments. * * Note these tables are keyed only by the prefix (and since there 2 two * per-VRF, implicitly by the VRF too). The key for an adjacency is the * tuple:{next-hop, address (and it's AF), interface, link/ether-type}. * consider this curious, but allowed, config; * * set int ip addr 10.0.0.1/24 Gig0 * set ip arp Gig0 10.0.0.2 dead.dead.dead * # a host in that sub-net is routed via a better next hop (say it avoids a * # big L2 domain) * ip route add 10.0.0.2 Gig1 192.168.1.1 * # this recursive should go via Gig1 * ip route add 1.1.1.1/32 via 10.0.0.2 * # this non-recursive should go via Gig0 * ip route add 2.2.2.2/32 via Gig0 10.0.0.2 * * for the last route, the lookup for the path (via {Gig0, 10.0.0.2}) in the * prefix table would not yield the correct result. To fix this we need a * separate table for the adjacencies. * * - FIB data structures; * * fib_entry_t: * - a representation of a route. * - has a prefix. * - it maintains an array of path-lists that have been contributed by the * different sources * - install an adjacency in the forwarding table contributed by the best * source's path-list. * * fib_path_list_t: * - a list of paths * - path-lists may be shared between FIB entries. The path-lists are thus * kept in a DB. The key is the combined description of the paths. We share * path-lists when it will aid convergence to do so. Adding path-lists to * this DB that are never shared, or are not shared by prefixes that are * not subject to PIC, will increase the size of the DB unnecessarily and * may lead to increased search times due to hash collisions. * - the path-list contributes the appropriate adj for the entry in the * forwarding table. The adj can be 'normal', multi-path or recursive, * depending on the number of paths and their types. * - since path-lists are shared there is only one instance of the multi-path * adj that they [may] create. As such multi-path adjacencies do not need a * separate DB. * The path-list with recursive paths and the recursive adjacency that it * contributes forms the backbone of the fast convergence architecture (as * described previously). * * fib_path_t: * - a description of how to forward the traffic (i.e. via {Gig1, K}). * - the path describes the intent on how to forward. This differs from how * the path resolves. I.e. it might not be resolved at all (since the * interface is deleted or down). * - paths have different types, most notably recursive or non-recursive. * - a fib_path_t will contribute the appropriate adjacency object. It is from * these contributions that the DP graph/chain for the route is built. * - if the path is recursive and a recursion loop is detected, then the path * will contribute the special DROP adjacency. This way, whilst the control * plane graph is looped, the data-plane graph does not. * * we build a graph of these objects; * * fib_entry_t -> fib_path_list_t -> fib_path_t -> ... * * for recursive paths: * * fib_path_t -> fib_entry_t -> .... * * for non-recursive paths * * fib_path_t -> ip_adjacency_t -> interface * * These objects, which constitute the 'control plane' part of the FIB are used * to represent the resolution of a route. As a whole this is referred to as the * control plane graph. There is a separate DP graph to represent the forwarding * of a packet. In the DP graph each object represents an action that is applied * to a packet as it traverses the graph. For example, a lookup of a IP address * in the forwarding table could result in the following graph: * * recursive-adj --> multi-path-adj --> interface_A * --> interface_B * * A packet traversing this FIB DP graph would thus also traverse a VPP node * graph of: * * ipX_recursive --> ipX_rewrite --> interface_A_tx --> etc * * The taxonomy of objects in a FIB graph is as follows, consider; * * A --> * B --> D * C --> * * Where A,B and C are (for example) routes that resolve through D. * parent; D is the parent of A, B, and C. * children: A, B, and C are children of D. * sibling: A, B and C are siblings of one another. * * All shared objects in the FIB are reference counted. Users of these objects * are thus expected to use the add_lock/unlock semantics (as one would * normally use malloc/free). * * WALKS * * It is necessary to walk/traverse the graph forwards (entry to interface) to * perform a collapse or build a recursive adj and backwards (interface * to entry) to perform updates, i.e. when interface state changes or when * recursive route resolution updates occur. * A forward walk follows simply by navigating an object's parent pointer to * access its parent object. For objects with multiple parents (e.g. a * path-list), each parent is walked in turn. * To support back-walks direct dependencies are maintained between objects, * i.e. in the relationship, {A, B, C} --> D, then object D will maintain a list * of 'pointers' to its children {A, B, C}. Bare C-language pointers are not * allowed, so a pointer is described in terms of an object type (i.e. entry, * path-list, etc) and index - this allows the object to be retrieved from the * appropriate pool. A list is maintained to achieve fast convergence at scale. * When there are millions or recursive prefixes, it is very inefficient to * blindly walk the tables looking for entries that were affected by a given * topology change. The lowest hanging fruit when optimising is to remove * actions that are not required, so all back-walks only traverse objects that * are directly affected by the change. * * PIC Core and fast-reroute rely on FIB reacting quickly to an interface * state change to update the multi-path-adjacencies that use this interface. * An example graph is shown below: * * E_a --> * E_b --> PL_2 --> P_a --> Interface_A * ... --> P_c -\ * E_k --> \ * Interface_K * / * E_l --> / * E_m --> PL_1 --> P_d -/ * ... --> P_f --> Interface_F * E_z --> * * E = fib_entry_t * PL = fib_path_list_t * P = fib_path_t * The subscripts are arbitrary and serve only to distinguish object instances. * This CP graph result in the following DP graph: * * M-ADJ-2 --> Interface_A * \ * -> Interface_K * / * M-ADJ-1 --> Interface_F * * M-ADJ = multi-path-adjacency. * * When interface K goes down a back-walk is started over its dependants in the * control plane graph. This back-walk will reach PL_1 and PL_2 and result in * the calculation of new adjacencies that have interface K removed. The walk * will continue to the entry objects and thus the forwarding table is updated * for each prefix with the new adjacency. The DP graph then becomes: * * ADJ-3 --> Interface_A * * ADJ-4 --> Interface_F * * The eBGP PIC scenarios described above relied on the update of a path-list's * recursive-adjacency to provide the shared point of cutover. This is shown * below * * E_a --> * E_b --> PL_2 --> P_a --> E_44 --> PL_a --> P_b --> Interface_A * ... --> P_c -\ * E_k --> \ * \ * E_1 --> PL_k -> P_k --> Interface_K * / * E_l --> / * E_m --> PL_1 --> P_d -/ * ... --> P_f --> E_55 --> PL_e --> P_e --> Interface_E * E_z --> * * The failure scenario is the removal of entry E_1 and thus the paths P_c and * P_d become unresolved. To achieve PIC the two shared recursive path-lists, * PL_1 and PL_2 must be updated to remove E_1 from the recursive-multi-path- * adjacencies that they contribute, before any entry E_a to E_z is updated. * This means that as the update propagates backwards (right to left) in the * graph it must do so breadth first not depth first. Note this approach leads * to convergence times that are dependent on the number of path-list and so * the number of combinations of egress PEs - this is desirable as this * scale is considerably lower than the number of prefixes. * * If we consider another section of the graph that is similar to the one * shown above where there is another prefix E_2 in a similar position to E_1 * and so also has many dependent children. It is reasonable to expect that a * particular network failure may simultaneously render E_1 and E_2 unreachable. * This means that the update to withdraw E_2 is download immediately after the * update to withdraw E_1. It is a requirement on the FIB to not spend large * amounts of time in a back-walk whilst processing the update for E_1, i.e. the * back-walk must not reach as far as E_a and its siblings. Therefore, after the * back-walk has traversed one generation (breadth first) to update all the * path-lists it should be suspended/back-ground and further updates allowed * to be handled. Once the update queue is empty, the suspended walks can be * resumed. Note that in the case that multiple updates affect the same entry * (say E_1) then this will trigger multiple similar walks, these are merged, * so each child is updated only once. * In the presence of more layers of recursion PIC is still a desirable * feature. Consider an extension to the diagram above, where more recursive * routes (E_100 -> E_200) are added as children of E_a: * * E_100 --> * E_101 --> PL_3 --> P_j-\ * ... \ * E_199 --> E_a --> * E_b --> PL_2 --> P_a --> E_44 --> ...etc.. * ... --> P_c -\ * E_k \ * E_1 --> ...etc.. * / * E_l --> / * E_m --> PL_1 --> P_d -/ * ... --> P_e --> E_55 --> ...etc.. * E_z --> * * To achieve PIC for the routes E_100->E_199, PL_3 needs to be updated before * E_b -> E_z, a breadth first traversal at each level would not achieve this. * Instead the walk must proceed intelligently. Children on PL_2 are sorted so * those Entry objects that themselves have children appear first in the list, * those without later. When an entry object is walked that has children, a * walk of its children is pushed to the front background queue. The back * ground queue is a priority queue. As the breadth first traversal proceeds * across the dependent entry object E_a to E_k, when the first entry that does * not have children is reached (E_b), the walk is suspended and placed at the * back of the queue. Following this prioritisation method shared path-list * updates are performed before all non-resolving entry objects. * The CPU/core/thread that handles the updates is the same thread that handles * the back-walks. Handling updates has a higher priority than making walk * progress, so a walk is required to be interruptable/suspendable when new * updates are available. * !!! TODO - this section describes how walks should be not how they are !!! * * In the diagram above E_100 is an IP route, however, VPP has no restrictions * on the type of object that can be a dependent of a FIB entry. Children of * a FIB entry can be (and are) GRE & VXLAN tunnels endpoints, L2VPN LSPs etc. * By including all object types into the graph and extending the back-walk, we * can thus deliver fast convergence to technologies that overlay on an IP * network. * * If having read all the above carefully you are still thinking; 'i don't need * all this %&$* i have a route only I know about and I just need to jam it in', * then fib_table_entry_special_add() is your only friend. */ #ifndef __FIB_H__ #define __FIB_H__ #include <vnet/fib/fib_table.h> #include <vnet/fib/fib_entry.h> #endif