From d9a08e59581aafe44d446b5bc1ce6ff86b2b173b Mon Sep 17 00:00:00 2001 From: Peter Mikus Date: Thu, 13 Jul 2017 14:42:38 +0200 Subject: CSIT-618 CSIT rls1707 Report - Update I Change-Id: I7c3af55db6cc89f03004db6ddf6fcf67965132a7 Signed-off-by: Peter Mikus --- .../dpdk_performance_tests/csit_release_notes.rst | 4 +- .../dpdk_performance_tests/test_environment.rst | 284 +++++++++++-- .../csit_release_notes.rst | 12 +- .../report/honeycomb_functional_tests/overview.rst | 10 +- docs/report/index.rst | 1 + docs/report/introduction/csit_design.png | Bin 0 -> 106902 bytes docs/report/introduction/csit_design.rst | 178 +++++++++ docs/report/introduction/csit_test_naming.rst | 120 +++--- docs/report/introduction/general_notes.rst | 6 +- docs/report/introduction/index.rst | 1 + docs/report/introduction/overview.rst | 7 + .../csit_release_notes.rst | 18 + .../nsh_sfc_functional_tests/documentation.rst | 6 + docs/report/nsh_sfc_functional_tests/index.rst | 10 + docs/report/nsh_sfc_functional_tests/overview.rst | 105 +++++ .../nsh_sfc_functional_tests/test_environment.rst | 440 +++++++++++++++++++++ .../vpp_functional_tests/csit_release_notes.rst | 14 +- docs/report/vpp_functional_tests/overview.rst | 14 +- .../vpp_functional_tests/test_environment.rst | 63 +-- .../vpp_performance_tests/csit_release_notes.rst | 268 ++++++------- docs/report/vpp_performance_tests/overview.rst | 91 ++--- .../packet_latency_graphs/index.rst | 8 +- .../packet_latency_graphs/ip4.rst | 60 +++ .../packet_latency_graphs/ip4_tunnels.rst | 51 +++ .../packet_latency_graphs/ip6.rst | 57 +++ .../packet_latency_graphs/ip6_tunnels.rst | 47 +++ .../packet_latency_graphs/ipsec.rst | 40 +- .../packet_latency_graphs/ipv4.rst | 60 --- .../packet_latency_graphs/ipv4_tunnels.rst | 51 --- .../packet_latency_graphs/ipv6.rst | 58 --- .../packet_latency_graphs/ipv6_tunnels.rst | 47 --- .../packet_latency_graphs/l2.rst | 82 ++-- .../packet_latency_graphs/vm_vhost.rst | 30 +- .../packet_throughput_graphs/index.rst | 8 +- .../packet_throughput_graphs/ip4.rst | 123 ++++++ .../packet_throughput_graphs/ip4_tunnels.rst | 105 +++++ .../packet_throughput_graphs/ip6.rst | 116 ++++++ .../packet_throughput_graphs/ip6_tunnels.rst | 98 +++++ .../packet_throughput_graphs/ipsec.rst | 67 ++-- .../packet_throughput_graphs/ipv4.rst | 126 ------ .../packet_throughput_graphs/ipv4_tunnels.rst | 105 ----- .../packet_throughput_graphs/ipv6.rst | 117 ------ .../packet_throughput_graphs/ipv6_tunnels.rst | 98 ----- .../packet_throughput_graphs/l2.rst | 83 +++- .../packet_throughput_graphs/vm_vhost.rst | 85 ++-- .../vpp_performance_tests/test_environment.rst | 278 ++++++++++++- resources/tools/report_gen/conf.py | 11 +- resources/tools/report_gen/run_report.sh | 107 ++--- 48 files changed, 2582 insertions(+), 1188 deletions(-) create mode 100644 docs/report/introduction/csit_design.png create mode 100644 docs/report/introduction/csit_design.rst create mode 100644 docs/report/nsh_sfc_functional_tests/csit_release_notes.rst create mode 100644 docs/report/nsh_sfc_functional_tests/documentation.rst create mode 100644 docs/report/nsh_sfc_functional_tests/index.rst create mode 100644 docs/report/nsh_sfc_functional_tests/overview.rst create mode 100644 docs/report/nsh_sfc_functional_tests/test_environment.rst create mode 100644 docs/report/vpp_performance_tests/packet_latency_graphs/ip4.rst create mode 100644 docs/report/vpp_performance_tests/packet_latency_graphs/ip4_tunnels.rst create mode 100644 docs/report/vpp_performance_tests/packet_latency_graphs/ip6.rst create mode 100644 docs/report/vpp_performance_tests/packet_latency_graphs/ip6_tunnels.rst delete mode 100644 docs/report/vpp_performance_tests/packet_latency_graphs/ipv4.rst delete mode 100644 docs/report/vpp_performance_tests/packet_latency_graphs/ipv4_tunnels.rst delete mode 100644 docs/report/vpp_performance_tests/packet_latency_graphs/ipv6.rst delete mode 100644 docs/report/vpp_performance_tests/packet_latency_graphs/ipv6_tunnels.rst create mode 100644 docs/report/vpp_performance_tests/packet_throughput_graphs/ip4.rst create mode 100644 docs/report/vpp_performance_tests/packet_throughput_graphs/ip4_tunnels.rst create mode 100644 docs/report/vpp_performance_tests/packet_throughput_graphs/ip6.rst create mode 100644 docs/report/vpp_performance_tests/packet_throughput_graphs/ip6_tunnels.rst delete mode 100644 docs/report/vpp_performance_tests/packet_throughput_graphs/ipv4.rst delete mode 100644 docs/report/vpp_performance_tests/packet_throughput_graphs/ipv4_tunnels.rst delete mode 100644 docs/report/vpp_performance_tests/packet_throughput_graphs/ipv6.rst delete mode 100644 docs/report/vpp_performance_tests/packet_throughput_graphs/ipv6_tunnels.rst diff --git a/docs/report/dpdk_performance_tests/csit_release_notes.rst b/docs/report/dpdk_performance_tests/csit_release_notes.rst index e8f47d9986..9673754d92 100644 --- a/docs/report/dpdk_performance_tests/csit_release_notes.rst +++ b/docs/report/dpdk_performance_tests/csit_release_notes.rst @@ -4,9 +4,9 @@ CSIT Release Notes Changes in CSIT |release| ------------------------- -#. Improved Testpmd tests +#. Improved performance of testpmd tests - - Performance of NICs - 2p40GE Intel xl710, 2p10GE Intel x710 + - Performance of NICs - 2p40GE Intel xl710, 2p10GE Intel x710 #. Added L3FWD tests on 2p10GE Intel x520-DA2 diff --git a/docs/report/dpdk_performance_tests/test_environment.rst b/docs/report/dpdk_performance_tests/test_environment.rst index ed71c7fe46..e8ed76d761 100644 --- a/docs/report/dpdk_performance_tests/test_environment.rst +++ b/docs/report/dpdk_performance_tests/test_environment.rst @@ -4,17 +4,144 @@ Test Environment To execute performance tests, there are three identical testbeds, each testbed consists of two SUTs and one TG. +Naming Convention +----------------- + +Following naming convention is used within this page to specify physical +connectivity and wiring across defined CSIT testbeds: + +- testbedname: testbedN. +- hostname: + + - traffic-generator: tN-tgW. + - system-under-testX: tN-sutX. + +- portnames: + + - tN-tgW-cY/pZ. + - tN-sutX-cY/pZ. + +- where: + + - N - testbed number. + - tgW - server acts as traffic-generator with W index. + - sutX - server acts as system-under-test with X index. + - Y - PCIe slot number denoting a NIC card number within the host. + + - Y=1,2,3 - slots in Riser 1, Right PCIe Riser Board, NUMA node 0. + - Y=4,5,6 - slots in Riser 2, Left PCIe Riser Board, NUMA node 1. + - Y=m - the MLOM slot. + + - Z - port number on the NIC card. + +Server HW Configuration +----------------------- + +CSIT testbed contains following three HW configuration types of UCS x86 servers, +across total of ten servers provided: + +#. Type-1: Purpose - VPP functional and performance conformance testing. + + - Quantity: 6 computers as SUT hosts (Systems Under Test). + - Physical connectivity: + + - CIMC and host management ports. + - NIC ports connected in 3-node topologies. + + - Main HW configuration: + + - Chassis: UCSC-C240-M4SX with 6 PCIe3.0 slots. + - Processors: 2* E5-2699 2.3 GHz. + - RAM Memory: 16* 32GB DDR4-2133MHz. + - Disks: 2* 2TB 12G SAS 7.2K RPM SFF HDD. + + - NICs configuration: + + - Right PCIe Riser Board (Riser 1) (x8, x8, x8 PCIe3.0 lanes) + + - PCIe Slot1: Cisco VIC 1385 2p40GE. + + - PCIe Slot2: Intel NIC x520 2p10GE. + - PCIe Slot3: empty. + + - Left PCIe Riser Board (Riser 2) (x8, x16, x8 PCIe3.0 lanes) + + - PCIe Slot4: Intel NIC xl710 2p40GE. + - PCIe Slot5: Intel NIC x710 2p10GE. + - PCIe Slot6: Intel QAT 8950 50G (Walnut Hill) + + - MLOM slot: Cisco VIC 1227 2p10GE (x8 PCIe2.0 lanes). + +#. Type-2: Purpose - VPP functional and performance conformance testing. + + - Quantity: 3 computers as TG hosts (Traffic Generators). + - Physical connectivity: + + - CIMC and host management ports. + - NIC ports connected in 3-node topologies. + + - Main HW configuration: + + - Chassis: UCSC-C240-M4SX with 6 PCIe3.0 slots. + - Processors: 2* E5-2699 2.3 GHz. + - RAM Memory: 16* 32GB DDR4-2133MHz. + - Disks: 2* 2TB 12G SAS 7.2K RPM SFF HDD. + + - NICs configuration: + + - Right PCIe Riser Board (Riser 1) (x8, x8, x8 lanes) + + - PCIe Slot1: Intel NIC xl710 2p40GE. + - PCIe Slot2: Intel NIC x710 2p10GE. + - PCIe Slot3: Intel NIC x710 2p10GE. + + - Left PCIe Riser Board (Riser 2) (x8, x16, x8 lanes) + + - PCIe Slot4: Intel NIC xl710 2p40GE. + - PCIe Slot5: Intel NIC x710 2p10GE. + - PCIe Slot6: Intel NIC x710 2p10GE. + + - MLOM slot: empty. + +#. Type-3: Purpose - VIRL functional conformance. + + - Quantity: 3 computers as VIRL hosts. + - Physical connectivity: + + - CIMC and host management ports. + - no NIC ports, standalone setup. + + - Main HW configuration: + + - Chassis: UCSC-C240-M4SX with 6 PCIe3.0 slots. + - Processors: 2* E5-2699 2.3 GHz. + - RAM Memory: 16* 32GB DDR4-2133MHz. + - Disks: 2* 480 GB 2.5inch 6G SATA SSD. + + - NICs configuration: + + - Right PCIe Riser Board (Riser 1) (x8, x8, x8 lanes) + + - no cards. + + - Left PCIe Riser Board (Riser 2) (x8, x16, x8 lanes) + + - no cards. + + - MLOM slot: empty. + SUT Configuration - Host HW --------------------------- Host hardware details (CPU, memory, NIC layout) and physical topology are -described in detail in -`LF FDio CSIT testbed wiki page `_. +described in detail in `LF FDio CSIT testbed wiki page +`_. **Host configuration** -- All hosts are Cisco UCS C240-M4 (2x Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz, - 18c, 512GB RAM) - :: +All hosts are Cisco UCS C240-M4 (2x Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz, +18c, 512GB RAM) + +:: $ lscpu Architecture: x86_64 @@ -42,8 +169,9 @@ described in detail in NUMA node1 CPU(s): 18-35 Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid cqm xsaveopt cqm_llc cqm_occup_llc dtherm arat pln pts -- BIOS settings - :: +**BIOS settings** + +:: C240 /bios # show advanced detail Set-up parameters: @@ -114,18 +242,19 @@ described in detail in CDN Support for VIC: Disabled Out-of-Band Management: Disabled -- In addition to CIMC and Management, each TG has 4x Intel X710 10GB NIC - (=8 ports) and 2x Intel XL710 40GB NIC (=4 ports), whereas each SUT has: +**NIC models and placement** - - 1x Intel X520 NIC (10GB, 2 ports), - - 1x Cisco VIC 1385 (40GB, 2 ports), - - 1x Intel XL710 NIC (40GB, 2 ports), - - 1x Intel X710 NIC (10GB, 2 ports), - - 1x Cisco VIC 1227 (10GB, 2 ports). - - This allows for a total of five ring topologies, each using ports on - specific NIC model, enabling per NIC model benchmarking. +In addition to CIMC and Management, each TG has 4x Intel X710 10GB NIC +(=8 ports) and 2x Intel XL710 40GB NIC (=4 ports), whereas each SUT has: -**NIC models and placement** +- 1x Intel X520 NIC (10GB, 2 ports), +- 1x Cisco VIC 1385 (40GB, 2 ports), +- 1x Intel XL710 NIC (40GB, 2 ports), +- 1x Intel X710 NIC (10GB, 2 ports), +- 1x Cisco VIC 1227 (10GB, 2 ports). + +This allows for a total of five ring topologies, each using ports on specific +NIC model, enabling per NIC model benchmarking. - 0a:00.0 Ethernet controller: Intel Corporation 82599ES 10-Gigabit SFI/SFP+ Network Connection (rev 01) Subsystem: Intel Corporation Ethernet Server @@ -157,10 +286,13 @@ described in detail in SUT Configuration - Host OS Linux --------------------------------- -Software details (OS, configuration) are described in -`CSIT/CSIT_LF_testbed `_. +Software details (OS, configuration) are described in FD.io wiki `LF FDio CSIT +testbed wiki page `_. -Below a subset of the configuration: +System provisioning is done by combination of PXE boot unattented install and +`Ansible `_ described in `CSIT Testbed Setup`_. + +Below a subset of the running configuration: :: @@ -233,11 +365,39 @@ Below a subset of the configuration: Node 1 HugePages_Free: 2048 Node 1 HugePages_Surp: 0 +**Kernel boot parameters used in CSIT performance testbeds** + +- **isolcpus=-** used for all cpu cores apart from + first core of each socket used for running VPP worker threads and Qemu/LXC + processes https://www.kernel.org/doc/Documentation/kernel-parameters.txt +- **intel_pstate=disable** - [X86] Do not enable intel_pstate as the default + scaling driver for the supported processors. Intel P-State driver decide what + P-state (CPU core power state) to use based on requesting policy from the + cpufreq core. [X86 - Either 32-bit or 64-bit x86] + https://www.kernel.org/doc/Documentation/cpu-freq/intel-pstate.txt +- **nohz_full=-** - [KNL,BOOT] In kernels built with + CONFIG_NO_HZ_FULL=y, set the specified list of CPUs whose tick will be stopped + whenever possible. The boot CPU will be forced outside the range to maintain + the timekeeping. The CPUs in this range must also be included in the + rcu_nocbs= set. Specifies the adaptive-ticks CPU cores, causing kernel to + avoid sending scheduling-clock interrupts to listed cores as long as they have + a single runnable task. [KNL - Is a kernel start-up parameter, SMP - The + kernel is an SMP kernel]. + https://www.kernel.org/doc/Documentation/timers/NO_HZ.txt +- **rcu_nocbs** - [KNL] In kernels built with CONFIG_RCU_NOCB_CPU=y, set the + specified list of CPUs to be no-callback CPUs, that never queue RCU callbacks + (read-copy update). + https://www.kernel.org/doc/Documentation/kernel-parameters.txt + +**Applied command line boot parameters:** + :: $ cat /proc/cmdline BOOT_IMAGE=/vmlinuz-4.4.0-72-generic root=UUID=35ea11e4-e44f-4f67-8cbe-12f09c49ed90 ro isolcpus=1-17,19-35 nohz_full=1-17,19-35 rcu_nocbs=1-17,19-35 intel_pstate=disable console=tty0 console=ttyS0,115200n8 +**Mount listing** + :: $ cat /proc/mounts @@ -273,6 +433,8 @@ Below a subset of the configuration: none /mnt/huge hugetlbfs rw,relatime,pagesize=2048k 0 0 lxcfs /var/lib/lxcfs fuse.lxcfs rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other 0 0 +**Package listing** + :: $ dpkg -l @@ -808,6 +970,8 @@ Below a subset of the configuration: ii zlib1g:amd64 1:1.2.8.dfsg-2ubuntu4 amd64 compression library - runtime ii zlib1g-dev:amd64 1:1.2.8.dfsg-2ubuntu4 amd64 compression library - development +**Kernel module listing** + :: $ lsmod | sort @@ -897,6 +1061,8 @@ Below a subset of the configuration: xt_CHECKSUM 16384 1 xt_tcpudp 16384 5 +**Sysctl listing** + :: $ sysctl -a @@ -1841,6 +2007,8 @@ Below a subset of the configuration: vm.vfs_cache_pressure = 100 vm.zone_reclaim_mode = 0 +**Services listing** + :: $ service --status-all @@ -1888,6 +2056,71 @@ Below a subset of the configuration: [ + ] uuidd [ - ] x11-common +**Host CFS optimizations (QEMU+VPP)** + +Applying CFS scheduler tuning on all Qemu vcpu worker threads (those are +handling testpmd - pmd threads) and VPP PMD worker threads. List of VPP PMD +threads can be obtained e.g. from: + +:: + + $ for psid in $(pgrep vpp) + $ do + $ for tid in $(ps -Lo tid --pid $psid | grep -v TID) + $ do + $ echo $tid + $ done + $ done + +Or: + +:: + + $ cat /proc/`pidof vpp`/task/*/stat | awk '{print $1" "$2" "$39}' + +Applying Round-robin scheduling with highest priority + +:: + + $ for psid in $(pgrep vpp) + $ do + $ for tid in $(ps -Lo tid --pid $psid | grep -v TID) + $ do + $ chrt -r -p 1 $tid + $ done + $ done + +More information about Linux CFS can be found in: `Sched manual pages +`_. + + +**Host IRQ affinity** + +Changing the default pinning of every IRQ to core 0. (Same does apply on both +guest VM and host OS) + +:: + + $ for l in `ls /proc/irq`; do echo 1 | sudo tee /proc/irq/$l/smp_affinity; done + +**Host RCU affinity** + +Changing the default pinning of RCU to core 0. (Same does apply on both guest VM +and host OS) + +:: + + $ for i in `pgrep rcu[^c]` ; do sudo taskset -pc 0 $i ; done + +**Host Writeback affinity** + +Changing the default pinning of writebacks to core 0. (Same does apply on both +guest VM and host OS) + +:: + + $ echo 1 | sudo tee /sys/bus/workqueue/devices/writeback/cpumask + DUT Configuration - DPDK ------------------------ @@ -1916,13 +2149,13 @@ Tagged by **2T2C** .. code-block:: bash - testpmd -c 0x403 -n 4 -- --numa --nb-ports=2 --portmask=0x3 --nb-cores=2 --max-pkt-len=9000 --txqflags=0 --forward-mode=io --rxq=1 --txq=1 --burst=64 --burst=64 --rxd=1024 --txd=1024 --disable-link-check --auto-start + testpmd -c 0x403 -n 4 -- --numa --nb-ports=2 --portmask=0x3 --nb-cores=2 --max-pkt-len=9000 --txqflags=0 --forward-mode=io --rxq=1 --txq=1 --burst=64 --rxd=1024 --txd=1024 --disable-link-check --auto-start Tagged by **4T4C** .. code-block:: bash - testpmd -c 0xc07 -n 4 -- --numa --nb-ports=2 --portmask=0x3 --nb-cores=4 --max-pkt-len=9000 --txqflags=0 --forward-mode=io --rxq=2 --txq=2 --burst=64 --burst=64 --rxd=1024 --txd=1024 --disable-link-check --auto-start + testpmd -c 0xc07 -n 4 -- --numa --nb-ports=2 --portmask=0x3 --nb-cores=4 --max-pkt-len=9000 --txqflags=0 --forward-mode=io --rxq=2 --txq=2 --burst=64 --rxd=1024 --txd=1024 --disable-link-check --auto-start **L3FWD Startup Configuration** @@ -1976,7 +2209,12 @@ DPDK v17.05 - dest_mac : [0x3c,0xfd,0xfe,0x9c,0xee,0xf4] src_mac : [0x3c,0xfd,0xfe,0x9c,0xee,0xf5] +**TG Startup Command** + +:: + + $ sh -c 'cd /scripts/ && sudo nohup ./t-rex-64 -i -c 7 --iom 0 > /dev/null 2>&1 &'> /dev/null + **TG common API - pointer to driver** `TRex driver`_ - diff --git a/docs/report/honeycomb_functional_tests/csit_release_notes.rst b/docs/report/honeycomb_functional_tests/csit_release_notes.rst index f36c7a78c0..a398fa41cf 100644 --- a/docs/report/honeycomb_functional_tests/csit_release_notes.rst +++ b/docs/report/honeycomb_functional_tests/csit_release_notes.rst @@ -6,12 +6,12 @@ Changes in CSIT |release| #. Added Honeycomb functional tests - - ACL plugin - - Routing - - SLAAC - - Proxy ARP - - DHCP Relay - - Neighbor Discovery Proxy + - ACL plugin + - Routing + - SLAAC + - Proxy ARP + - DHCP Relay + - Neighbor Discovery Proxy #. Changed execution environment from Ubuntu14.04 to Ubuntu16.04 diff --git a/docs/report/honeycomb_functional_tests/overview.rst b/docs/report/honeycomb_functional_tests/overview.rst index 9cf741e013..c73e9706f8 100644 --- a/docs/report/honeycomb_functional_tests/overview.rst +++ b/docs/report/honeycomb_functional_tests/overview.rst @@ -185,12 +185,12 @@ suites: #. **Physical port to physical port - a.k.a. NIC-to-NIC, Phy-to-Phy, P2P** - - *eth2p-ethip4-ip4base-func.robot* => 2 ports of Ethernet, IPv4 baseline - routed forwarding, functional tests. + - *eth2p-ethip4-ip4base-func.robot* => 2 ports of Ethernet, IPv4 baseline + routed forwarding, functional tests. #. **Physical port to VM (or VM chain) to physical port - a.k.a. NIC2VM2NIC, P2V2P, NIC2VMchain2NIC, P2V2V2P** - - *eth2p-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-func.robot* => 2 ports of - Ethernet, IPv4 VXLAN Ethernet, L2 bridge-domain switching to/from two vhost - interfaces and one VM, functional tests. + - *eth2p-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-func.robot* => 2 ports of + Ethernet, IPv4 VXLAN Ethernet, L2 bridge-domain switching to/from two vhost + interfaces and one VM, functional tests. diff --git a/docs/report/index.rst b/docs/report/index.rst index 29f067bd8d..be7129cb79 100644 --- a/docs/report/index.rst +++ b/docs/report/index.rst @@ -10,6 +10,7 @@ CSIT 17.07 vpp_functional_tests/index honeycomb_functional_tests/index vpp_unit_tests/index + nsh_sfc_functional_tests/index detailed_test_results/index test_configuration/index test_operational_data/index diff --git a/docs/report/introduction/csit_design.png b/docs/report/introduction/csit_design.png new file mode 100644 index 0000000000..175c2f597a Binary files /dev/null and b/docs/report/introduction/csit_design.png differ diff --git a/docs/report/introduction/csit_design.rst b/docs/report/introduction/csit_design.rst new file mode 100644 index 0000000000..8c6c87f319 --- /dev/null +++ b/docs/report/introduction/csit_design.rst @@ -0,0 +1,178 @@ +CSIT Design +=========== + +FD.io CSIT system design needs to meet continuously expanding requirements of +FD.io projects including VPP, related sub-systems (e.g. plugin applications, +DPDK drivers) and FD.io applications (e.g. DPDK applications), as well as +growing number of compute platforms running those applications. With CSIT +project scope and charter including both FD.io continuous testing AND +performance trending/comparisons, those evolving requirements further amplify +the need for CSIT framework modularity, flexibility and usability. + +Design Hierarchy +---------------- + +CSIT follows a hierarchical system design with SUTs and DUTs at the bottom +level, and presentation level at the top level, with a number of functional +layers in-between. The current CSIT design including CSIT framework is depicted +in the diagram below. + +.. figure:: csit_design.png + :alt: FD.io CSIT system design + :align: center + + *Figure 1. FD.io CSIT system design* + +A brief bottom-up description is provided here: + +#. SUTs, DUTs, TGs: + + - SUTs - Systems Under Test + - DUTs - Devices Under Test + - TGs - Traffic Generators + +#. Level-1 libraries - Robot and Python: + + - Lowest level CSIT libraries abstracting underlying test environment, SUT, + DUT and TG specifics + - Used commonly across multiple L2 KWs + - Performance and functional tests: + + - L1 KWs (KeyWords) are implemented as RF libraries and Python + libraries + + - Performance TG L1 KWs: + + - All L1 KWs are implemented as Python libraries + + - Support for TRex only today + - Need to add IXIA + + - Performance data plane traffic profiles: + + - TG-specific stream profiles provide full control of: + + - Packet definition – layers, MACs, IPs, ports, combinations thereof + e.g. IPs and UDP ports + - Stream definitions - different streams can run together, delayed, + one after each other + - Stream profiles are independent of CSIT framework and can be used + in any T-rex setup, can be sent anywhere to repeat tests with + exactly the same setup + - Easily extensible – one can create a new stream profile that meets + tests requirements + - Same stream profile can be used for different tests with the same + traffic needs + + - Sunctional data plane traffic scripts: + + - Scapy specific traffic scripts + +#. Level-2 libraries - Robot resource files: + + - Higher level CSIT libraries abstracting required functions for executing + tests + - L2 KWs are classified into the following functional categories: + + - Configuration, test, verification, state report + - Suite setup, suite teardown + - Test setup, test teardown + +#. Tests - Robot: + + - Test suites with test cases; + - Functional tests using VIRL environment: + + - VPP + - HoneyComb + + - Performance tests using physical testbed environment: + + - VPP + - Testpmd + + - Tools: + + - Documentation generator + - Report generator + - Testbed environment setup ansible playbooks + - Operational debugging scripts + +Test Lifecycle Abstraction +-------------------------- + +A well coded test must follow a disciplined abstraction of the test lifecycles +that includes setup, configuration, test and verification. In addition to +improve test execution efficiency, the commmon aspects of test setup and +configuration shared across multiple test cases should be done only once. +Translating these high-level guidelines into the Robot Framework one arrives to +definition of a well coded RF tests for FD.io CSIT. +Anatomy of Good Tests for CSIT: + +#. Suite Setup - Suite startup Configuration common to all Test Cases in suite: + uses Configuration KWs, Verification KWs, StateReport KWs +#. Test Setup - Test startup Configuration common to multiple Test Cases: uses + Configuration KWs, StateReport KWs +#. Test Case - uses L2 KWs with RF Gherkin style: + + - prefixed with {Given} - Verification of Test setup, reading state: uses + Configuration KWs, Verification KWs, StateReport KWs + - prefixed with {When} - Test execution: Configuration KWs, Test KWs + - prefixed with {Then} - Verification of Test execution, reading state: uses + Verification KWs, StateReport KWs + +#. Test Teardown - post Test teardown with Configuration cleanup and + Verification common to multiple Test Cases - uses: Configuration KWs, + Verification KWs, StateReport KWs +#. Suite Teardown - Suite post-test Configuration cleanup: uses Configuration + KWs, Verification KWs, StateReport KWs + +RF Keywords Functional Classification +------------------------------------- + +CSIT RF KWs are classified into the functional categories matching the test +lifecycle events described earlier. All CSIT RF L2 and L1 KWs have been grouped +into the following functional categories: + +#. Configuration +#. Test +#. Verification +#. StateReport +#. SuiteSetup +#. TestSetup +#. SuiteTeardown +#. TestTeardown + +RF Keywords Naming Guidelines +----------------------------- + +Readability counts: "..code is read much more often than it is written." Hence +following a good and consistent grammar practice is important when writing RF +KeyWords and Tests. +All CSIT test cases are coded using Gherkin style and include only L2 KWs +references. L2 KWs are coded using simple style and include L2 KWs, L1 KWs, and +L1 python references. To improve readability, the proposal is to use the same +grammar for both RF KW styles, and to formalize the grammar of English sentences +used for naming the RF KWs. +RF KWs names are short sentences expressing functional description of the +command. They must follow English sentence grammar in one of the following +forms: + +#. **Imperative** - verb-object(s): *"Do something"*, verb in base form. +#. **Declarative** - subject–verb–object(s): *"Subject does something"*, verb in + a third-person singular present tense form. +#. **Affirmative** - modal_verb-verb-object(s): *"Subject should be something"*, + *"Object should exist"*, verb in base form. +#. **Negative** - modal_verb-Not-verb-object(s): *"Subject should not be + something"*, *"Object should not exist"*, verb in base form. + +Passive form MUST NOT be used. However a usage of past participle as an +adjective is okay. See usage examples. +Following sections list applicability of the above grammar forms to different +RF KW categories. Usage examples are provided, both good and bad. + +Coding guidelines +----------------- + +Coding guidelines can be found on `Design optimizations wiki page +`_. diff --git a/docs/report/introduction/csit_test_naming.rst b/docs/report/introduction/csit_test_naming.rst index 682fcd941a..13eab06df5 100644 --- a/docs/report/introduction/csit_test_naming.rst +++ b/docs/report/introduction/csit_test_naming.rst @@ -19,26 +19,26 @@ Naming Convention The CSIT approach is to use tree naming convention and to encode following testing information into test suite and test case names: -1. packet network port configuration +#. packet network port configuration - * port type, physical or virtual; - * number of ports; - * NIC model, if applicable; - * port-NIC locality, if applicable; + * port type, physical or virtual; + * number of ports; + * NIC model, if applicable; + * port-NIC locality, if applicable; -2. packet encapsulations; +#. packet encapsulations; -3. VPP packet processing +#. VPP packet processing - * packet forwarding mode; - * packet processing function(s); + * packet forwarding mode; + * packet processing function(s); -4. packet forwarding path +#. packet forwarding path - * if present, network functions (processes, containers, VMs) and their - topology within the computer; + * if present, network functions (processes, containers, VMs) and their + topology within the computer; -5. main measured variable, type of test. +#. main measured variable, type of test. Proposed convention is to encode ports and NICs on the left (underlay), followed by outer-most frame header, then other stacked headers up to the @@ -58,61 +58,61 @@ topologies: 1. **Physical port to physical port - a.k.a. NIC-to-NIC, Phy-to-Phy, P2P** - * *PortNICConfig-WireEncapsulation-PacketForwardingFunction- - PacketProcessingFunction1-...-PacketProcessingFunctionN-TestType* - * *10ge2p1x520-dot1q-l2bdbasemaclrn-ndrdisc.robot* => 2 ports of 10GE on Intel - x520 NIC, dot1q tagged Ethernet, L2 bridge-domain baseline switching with - MAC learning, NDR throughput discovery. - * *10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrchk.robot* => 2 ports of 10GE on - Intel x520 NIC, IPv4 VXLAN Ethernet, L2 bridge-domain baseline switching - with MAC learning, NDR throughput discovery. - * *10ge2p1x520-ethip4-ip4base-ndrdisc.robot* => 2 ports of 10GE on Intel x520 - NIC, IPv4 baseline routed forwarding, NDR throughput discovery. - * *10ge2p1x520-ethip6-ip6scale200k-ndrdisc.robot* => 2 ports of 10GE on Intel - x520 NIC, IPv6 scaled up routed forwarding, NDR throughput discovery. - * *10ge2p1x520-ethip4-ip4base-iacldstbase-ndrdisc.robot* => 2 ports of 10GE on - Intel x520 NIC, IPv4 baseline routed forwarding, ingress Access Control - Lists baseline matching on destination, NDR throughput discovery. - * *40ge2p1vic1385-ethip4-ip4base-ndrdisc.robot* => 2 ports of 40GE on Cisco - vic1385 NIC, IPv4 baseline routed forwarding, NDR throughput discovery. - * *eth2p-ethip4-ip4base-func.robot* => 2 ports of Ethernet, IPv4 baseline - routed forwarding, functional tests. + * *PortNICConfig-WireEncapsulation-PacketForwardingFunction- + PacketProcessingFunction1-...-PacketProcessingFunctionN-TestType* + * *10ge2p1x520-dot1q-l2bdbasemaclrn-ndrdisc.robot* => 2 ports of 10GE on Intel + x520 NIC, dot1q tagged Ethernet, L2 bridge-domain baseline switching with + MAC learning, NDR throughput discovery. + * *10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrchk.robot* => 2 ports of 10GE on + Intel x520 NIC, IPv4 VXLAN Ethernet, L2 bridge-domain baseline switching + with MAC learning, NDR throughput discovery. + * *10ge2p1x520-ethip4-ip4base-ndrdisc.robot* => 2 ports of 10GE on Intel x520 + NIC, IPv4 baseline routed forwarding, NDR throughput discovery. + * *10ge2p1x520-ethip6-ip6scale200k-ndrdisc.robot* => 2 ports of 10GE on Intel + x520 NIC, IPv6 scaled up routed forwarding, NDR throughput discovery. + * *10ge2p1x520-ethip4-ip4base-iacldstbase-ndrdisc.robot* => 2 ports of 10GE on + Intel x520 NIC, IPv4 baseline routed forwarding, ingress Access Control + Lists baseline matching on destination, NDR throughput discovery. + * *40ge2p1vic1385-ethip4-ip4base-ndrdisc.robot* => 2 ports of 40GE on Cisco + vic1385 NIC, IPv4 baseline routed forwarding, NDR throughput discovery. + * *eth2p-ethip4-ip4base-func.robot* => 2 ports of Ethernet, IPv4 baseline + routed forwarding, functional tests. 2. **Physical port to VM (or VM chain) to physical port - a.k.a. NIC2VM2NIC, P2V2P, NIC2VMchain2NIC, P2V2V2P** - * *PortNICConfig-WireEncapsulation-PacketForwardingFunction- - PacketProcessingFunction1-...-PacketProcessingFunctionN-VirtEncapsulation- - VirtPortConfig-VMconfig-TestType* - * *10ge2p1x520-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc.robot* => 2 ports - of 10GE on Intel x520 NIC, dot1q tagged Ethernet, L2 bridge-domain switching - to/from two vhost interfaces and one VM, NDR throughput discovery. - * *10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc.robot* => 2 - ports of 10GE on Intel x520 NIC, IPv4 VXLAN Ethernet, L2 bridge-domain - switching to/from two vhost interfaces and one VM, NDR throughput discovery. - * *10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-4vhost-2vm-ndrdisc.robot* => 2 - ports of 10GE on Intel x520 NIC, IPv4 VXLAN Ethernet, L2 bridge-domain - switching to/from four vhost interfaces and two VMs, NDR throughput - discovery. - * *eth2p-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-func.robot* => 2 ports of - Ethernet, IPv4 VXLAN Ethernet, L2 bridge-domain switching to/from two vhost - interfaces and one VM, functional tests. + * *PortNICConfig-WireEncapsulation-PacketForwardingFunction- + PacketProcessingFunction1-...-PacketProcessingFunctionN-VirtEncapsulation- + VirtPortConfig-VMconfig-TestType* + * *10ge2p1x520-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc.robot* => 2 ports + of 10GE on Intel x520 NIC, dot1q tagged Ethernet, L2 bridge-domain switching + to/from two vhost interfaces and one VM, NDR throughput discovery. + * *10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc.robot* => 2 + ports of 10GE on Intel x520 NIC, IPv4 VXLAN Ethernet, L2 bridge-domain + switching to/from two vhost interfaces and one VM, NDR throughput discovery. + * *10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-4vhost-2vm-ndrdisc.robot* => 2 + ports of 10GE on Intel x520 NIC, IPv4 VXLAN Ethernet, L2 bridge-domain + switching to/from four vhost interfaces and two VMs, NDR throughput + discovery. + * *eth2p-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-func.robot* => 2 ports of + Ethernet, IPv4 VXLAN Ethernet, L2 bridge-domain switching to/from two vhost + interfaces and one VM, functional tests. 3. **API CRUD tests - Create (Write), Read (Retrieve), Update (Modify), Delete (Destroy) operations for configuration and operational data** - * *ManagementTestKeyword-ManagementOperation-ManagedFunction1-...- - ManagedFunctionN-ManagementAPI1-ManagementAPIN-TestType* - * *mgmt-cfg-lisp-apivat-func* => configuration of LISP with VAT API calls, - functional tests. - * *mgmt-cfg-l2bd-apihc-apivat-func* => configuration of L2 Bridge-Domain with - HoneyComb API and VAT API calls, functional tests. - * *mgmt-oper-int-apihcnc-func* => reading status and operational data of - interface with HoneyComb NetConf API calls, functional tests. - * *mgmt-cfg-int-tap-apihcnc-func* => configuration of tap interfaces with - HoneyComb NetConf API calls, functional tests. - * *mgmt-notif-int-subint-apihcnc-func* => notifications of interface and - sub-interface events with HoneyComb NetConf Notifications, functional tests. + * *ManagementTestKeyword-ManagementOperation-ManagedFunction1-...- + ManagedFunctionN-ManagementAPI1-ManagementAPIN-TestType* + * *mgmt-cfg-lisp-apivat-func* => configuration of LISP with VAT API calls, + functional tests. + * *mgmt-cfg-l2bd-apihc-apivat-func* => configuration of L2 Bridge-Domain with + HoneyComb API and VAT API calls, functional tests. + * *mgmt-oper-int-apihcnc-func* => reading status and operational data of + interface with HoneyComb NetConf API calls, functional tests. + * *mgmt-cfg-int-tap-apihcnc-func* => configuration of tap interfaces with + HoneyComb NetConf API calls, functional tests. + * *mgmt-notif-int-subint-apihcnc-func* => notifications of interface and + sub-interface events with HoneyComb NetConf Notifications, functional tests. For complete description of CSIT test naming convention please refer to `CSIT test naming wiki page `_. diff --git a/docs/report/introduction/general_notes.rst b/docs/report/introduction/general_notes.rst index d96dc00101..f91d96138f 100644 --- a/docs/report/introduction/general_notes.rst +++ b/docs/report/introduction/general_notes.rst @@ -41,9 +41,9 @@ is listed separately, as follows: functionality of VPP. Tests cover a range of CRUD operations executed against VPP. -#. **TLDK Tests** - TODO - -#. **NSH_SFC Tests** - TODO +#. **NSH_SFC Functional Tests** - NSH_SFC functional tests are executed in + virtual FD.io testbeds focusing onNSH_SFC of VPP. Tests cover a range of + CRUD operations executed against VPP. In addition to above, CSIT |release| report does also include VPP unit test results. VPP unit tests are developed within the FD.io VPP project and as they diff --git a/docs/report/introduction/index.rst b/docs/report/introduction/index.rst index 7c74f46cd1..eabbf7b7ae 100644 --- a/docs/report/introduction/index.rst +++ b/docs/report/introduction/index.rst @@ -5,5 +5,6 @@ Introduction overview general_notes + csit_design csit_test_naming csit_tag_description diff --git a/docs/report/introduction/overview.rst b/docs/report/introduction/overview.rst index 1356ca6795..7954686ad3 100644 --- a/docs/report/introduction/overview.rst +++ b/docs/report/introduction/overview.rst @@ -61,6 +61,13 @@ CSIT |release| report contains following main sections and sub-sections: *Overview* - short overview of unit test framework and executed tests; *Documentation* - source code documentation of VPP unit tests. +#. **NSH_SFC Functional Tests** - NSH_SFC functional tests executed in + virtual FD.io testbeds; *Overview* - tested virtual topologies, test + coverage and naming specifics; *CSIT Release Notes* - changes in CSIT + |release|, added tests, environment or methodology changes, known CSIT issues; + *Test Environment* - environment description ; + *Documentation* - source code documentation for NSH_SFC functional tests. + #. **Detailed Test Results** - auto-generated results from CSIT jobs executions using CSIT Robot Framework output files as source data; *VPP Performance Results*, *DPDK Performance Results*, *VPP Functional diff --git a/docs/report/nsh_sfc_functional_tests/csit_release_notes.rst b/docs/report/nsh_sfc_functional_tests/csit_release_notes.rst new file mode 100644 index 0000000000..6655a4538d --- /dev/null +++ b/docs/report/nsh_sfc_functional_tests/csit_release_notes.rst @@ -0,0 +1,18 @@ +CSIT Release Notes +================== + +Changes in CSIT |release| +------------------------- + +#. TODO + +Known Issues +------------ + +Here is the list of known issues in CSIT |release| for NSH_SFC functional tests in VIRL: + ++---+-------------------------------------------------+----------+------------------------------------------------------+ +| # | Issue | Jira ID | Description | ++---+-------------------------------------------------+----------+------------------------------------------------------+ +| 1 | TODO | CSIT-??? | TODO | ++---+-------------------------------------------------+----------+------------------------------------------------------+ diff --git a/docs/report/nsh_sfc_functional_tests/documentation.rst b/docs/report/nsh_sfc_functional_tests/documentation.rst new file mode 100644 index 0000000000..709f2a8a84 --- /dev/null +++ b/docs/report/nsh_sfc_functional_tests/documentation.rst @@ -0,0 +1,6 @@ +Documentation +============= + +`CSIT NSH_SFC Functional Tests Documentation`_ contains detailed +functional description and input parameters for each test case. + diff --git a/docs/report/nsh_sfc_functional_tests/index.rst b/docs/report/nsh_sfc_functional_tests/index.rst new file mode 100644 index 0000000000..c7678d26a0 --- /dev/null +++ b/docs/report/nsh_sfc_functional_tests/index.rst @@ -0,0 +1,10 @@ +NSH_SFC Functional Tests +======================== + +.. toctree:: + + overview + csit_release_notes + test_environment + documentation + diff --git a/docs/report/nsh_sfc_functional_tests/overview.rst b/docs/report/nsh_sfc_functional_tests/overview.rst new file mode 100644 index 0000000000..343e11dd83 --- /dev/null +++ b/docs/report/nsh_sfc_functional_tests/overview.rst @@ -0,0 +1,105 @@ +Overview +======== + +Tested Virtual Topologies +------------------------- + +CSIT NSH_SFC functional tests are executed on virtualized topologies created +using Virtual Internet Routing Lab (VIRL) simulation platform contributed by +Cisco. VIRL runs on physical baremetal servers hosted by LF FD.io project. +Majority,of the tests are executed in the three node logical test topology - +Traffic Generator (TG) node and two Systems Under Test (SUT) nodes connected in +a loop. Some tests use two node logical test topology - TG node and SUT1 node. +Both logical test topologies are shown in the figures below. + +:: + + +------------------------+ +------------------------+ + | | | | + | +------------------+ | | +------------------+ | + | | <-----------------> | | + | | | | | | | | + | | DUT1 <-----------------> DUT2 | | + | +--^--^------------+ | | +------------^--^--+ | + | | | | | | | | + | | | SUT1 | | SUT2 | | | + +------------------------+ +------------------------+ + | | | | + | | | | + | | +-----------+ | | + | +---------------> <---------------+ | + | | TG | | + +------------------> <------------------+ + +-----------+ + + +------------------------+ + | | + | +------------------+ | + +---------------> <--------------+ + | | | | | | + | |------------> DUT1 <-----------+ | + | | | +------------------+ | | | + | | | | | | + | | | SUT1 | | | + | | +------------------------+ | | + | | | | + | | | | + | | +-----------+ | | + | +---------------> <---------------+ | + | | TG | | + +------------------> <------------------+ + +-----------+ + +SUT1 and SUT2 are two VMs (Ubuntu or Centos, depending on the test suite), TG +is a Traffic Generator (TG, another Ubuntu VM). SUTs run VPP SW application in +Linux user-mode as a Device Under Test (DUT) within the VM. TG runs Scapy SW +application as a packet Traffic Generator. Logical connectivity between SUTs +and to TG is provided using virtual NICs using VMs' virtio driver. + +Virtual testbeds are created on-demand whenever a verification job is started +(e.g. triggered by the gerrit patch submission) and destroyed upon completion +of all functional tests. Each node is a Virtual Machine and each connection +that is drawn on the diagram is available for use in any test case. During the +test execution, all nodes are reachable thru the Management network connected +to every node via dedicated virtual NICs and virtual links (not shown above +for clarity). + +For the test cases that require DUT (VPP) to communicate with VM over the +vhost-user interfaces, a nested VM is created on SUT1 and/or SUT2 for the +duration of these particular test cases only. DUT (VPP) test topology with VM +is shown in the figure below including the applicable packet flow thru the VM +(marked in the figure with ``***``). + +:: + + +------------------------+ +------------------------+ + | +----------+ | | +----------+ | + | | VM | | | | VM | | + | | ****** | | | | ****** | | + | +--^----^--+ | | +--^----^--+ | + | *| |* | | *| |* | + | +------v----v------+ | | +------v----v------+ | + | | * * |**|***********|**| * * | | + | | ***** *******<----------------->******* ***** | | + | | * DUT1 | | | | DUT2 * | | + | +--^---------------+ | | +---------------^--+ | + | *| | | |* | + | *| SUT1 | | SUT2 |* | + +------------------------+ +------------------^-----+ + *| |* + *| |* + *| +-----------+ |* + *| | | |* + *+------------------> TG <------------------+* + ******************* | |******************** + +-----------+ + +NSH_SFC Functional Tests Coverage +--------------------------------- + +Following NSH_SFC functional test areas are covered in the CSIT |release| with +results listed in this report: + +- TODO + + diff --git a/docs/report/nsh_sfc_functional_tests/test_environment.rst b/docs/report/nsh_sfc_functional_tests/test_environment.rst new file mode 100644 index 0000000000..79ca0e90c7 --- /dev/null +++ b/docs/report/nsh_sfc_functional_tests/test_environment.rst @@ -0,0 +1,440 @@ +Test Environment +================ + +CSIT NSH_SFC functional tests are currently executed in FD.IO VIRL testbed. The +physical VIRL testbed infrastructure consists of three identical VIRL hosts: + +- All hosts are Cisco UCS C240-M4 (2x Intel(R) Xeon(R) CPU E5-2699 v3 @2.30GHz, + 18c, 512GB RAM) +- Hosts run Ubuntu 14.04.3 +- VIRL software versions: + + - STD server version 0.10.24.7 + - UWM server version 0.10.24.7 + +The VIRL host to run VIRL simulation is selected based on least load algorithm +per VIRL simulation. + +Every VIRL simulation uses the same three-node - Traffic Generator (TG node) and +two Systems Under Test (SUT1 and SUT2) - "double-ring" topology. The appropriate +pre-built VPP packages built by Jenkins for the patch under review are then +installed on the two SUTs, along with their /etc/vpp/startup.conf file, in all +VIRL simulations. + +SUT Configuration - VIRL Guest VM +--------------------------------- + +Configuration of the SUT VMs is defined in file + + /csit/resources/tools/virl/topologies/double-ring-nested.xenial.virl + +- List of SUT VM interfaces::: + + + + + + +- Number of 2MB hugepages: 1024 + +- Maximum number of memory map areas: 20000 + +- Kernel Shared Memory Max: 2147483648 (vm.nr_hugepages * 2 * 1024 * 1024) + +SUT Configuration - VIRL Guest OS Linux +--------------------------------------- + +In CSIT terminology, the VM operating system for both SUTs that |vpp-release| has +been tested with, is the following: + +**#. Ubuntu VIRL image** + +This image implies Ubuntu 16.04.1 LTS, current as of yyyy-mm-dd (that is, +package versions are those that would have been installed by a "apt-get update", +"apt-get upgrade" on that day), produced by CSIT disk image build scripts. + +The exact list of installed packages and their versions (including the Linux +kernel package version) are included in CSIT source repository: + + resources/tools/disk-image-builder/ubuntu/lists/|virl-image-ubuntu| + +A replica of this VM image can be built by running the "build.sh" script in CSIT +repository resources/tools/disk-image-builder/ubuntu. + +**#. CentOS VIRL image** + +The Centos7.3 image is ready to be used but no tests running on it now. +Corresponding Jenkins jobs are under preparation. + +The exact list of installed packages and their versions (including the Linux +kernel package version) are included in CSIT source repository: + + resources/tools/disk-image-builder/ubuntu/lists/|virl-image-centos| + +A replica of this VM image can be built by running the "build.sh" script in CSIT +repository resources/tools/disk-image-builder/centos. + +**#. Nested VM image** + +In addition to the "main" VM image, tests which require VPP to communicate to a +VM over a vhost-user interface, utilize a "nested" VM image. + +This "nested" VM is dynamically created and destroyed as part of a test case, +and therefore the "nested" VM image is optimized to be small, lightweight and +have a short boot time. The "nested" VM image is not built around any +established Linux distribution, but is based on `BuildRoot +`_, a tool for building embedded Linux systems. Just as +for the "main" image, scripts to produce an identical replica of the "nested" +image are included in CSIT GIT repository, and the image can be rebuilt using +the "build.sh" script at: + + resources/tools/disk-image-builder/ubuntu/lists/nested + +DUT Configuration - VPP +----------------------- + +Every System Under Test runs VPP SW application in Linux user-mode as a Device +Under Test (DUT) node. + +**DUT port configuration** + +Port configuration of DUTs is defined in topology file that is generated per +VIRL simulation based on the definition stored in file + + /csit/resources/tools/virl/topologies/double-ring-nested.xenial.yaml + +Example of DUT nodes configuration::: + + DUT1: + type: DUT + host: "10.30.51.157" + port: 22 + username: cisco + honeycomb: + user: admin + passwd: admin + port: 8183 + netconf_port: 2831 + priv_key: | + -----BEGIN RSA PRIVATE KEY----- + MIIEpgIBAAKCAQEAwUDlTpzSHpwLQotZOFS4AgcPNEWCnP1AB2hWFmvI+8Kah/gb + v8ruZU9RqhPs56tyKzxbhvNkY4VbH5F1GilHZu3mLqzM4KfghMmaeMEjO1T7BYYd + vuBfTvIluljfQ2vAlnYrDwn+ClxJk81m0pDgvrLEX4qVVh2sGh7UEkYy5r82DNa2 + 4VjzPB1J/c8a9zP8FoZUhYIzF4FLvRMjUADpbMXgJMsGpaZLmz95ap0Eot7vb1Cc + 1LvF97iyBCrtIOSKRKA50ZhLGjMKmOwnYU+cP5718tbproDVi6VJOo7zeuXyetMs + 8YBl9kWblWG9BqP9jctFvsmi5G7hXgq1Y8u+DwIDAQABAoIBAQC/W4E0DHjLMny7 + 0bvw2YKzD0Zw3fttdB94tkm4PdZv5MybooPnsAvLaXVV0hEdfVi5kzSWNl/LY/tN + EP1BgGphc2QgB59/PPxGwFIjDCvUzlsZpynBHe+B/qh5ExNQcVvsIOqWI7DXlXaN + 0i/khOzmJ6HncRRah1spKimYRsaUUDskyg7q3QqMWVaqBbbMvLs/w7ZWd/zoDqCU + MY/pCI6hkB3QbRo0OdiZLohphBl2ShABTwjvVyyKL5UA4jAEneJrhH5gWVLXnfgD + p62W5CollKEYblC8mUkPxpP7Qo277zw3xaq+oktIZhc5SUEUd7nJZtNqVAHqkItW + 79VmpKyxAoGBAPfU+kqNPaTSvp+x1n5sn2SgipzDtgi9QqNmC4cjtrQQaaqI57SG + OHw1jX8i7L2G1WvVtkHg060nlEVo5n65ffFOqeVBezLVJ7ghWI8U+oBiJJyQ4boD + GJVNsoOSUQ0rtuGd9eVwfDk3ol9aCN0KK53oPfIYli29pyu4l095kg11AoGBAMef + bPEMBI/2XmCPshLSwhGFl+dW8d+Klluj3CUQ/0vUlvma3dfBOYNsIwAgTP0iIUTg + 8DYE6KBCdPtxAUEI0YAEAKB9ry1tKR2NQEIPfslYytKErtwjAiqSi0heM6+zwEzu + f54Z4oBhsMSL0jXoOMnu+NZzEc6EUdQeY4O+jhjzAoGBAIogC3dtjMPGKTP7+93u + UE/XIioI8fWg9fj3sMka4IMu+pVvRCRbAjRH7JrFLkjbUyuMqs3Arnk9K+gbdQt/ + +m95Njtt6WoFXuPCwgbM3GidSmZwYT4454SfDzVBYScEDCNm1FuR+8ov9bFLDtGT + D4gsngnGJj1MDFXTxZEn4nzZAoGBAKCg4WmpUPaCuXibyB+rZavxwsTNSn2lJ83/ + sYJGBhf/raiV/FLDUcM1vYg5dZnu37RsB/5/vqxOLZGyYd7x+Jo5HkQGPnKgNwhn + g8BkdZIRF8uEJqxOo0ycdOU7n/2O93swIpKWo5LIiRPuqqzj+uZKnAL7vuVdxfaY + qVz2daMPAoGBALgaaKa3voU/HO1PYLWIhFrBThyJ+BQSQ8OqrEzC8AnegWFxRAM8 + EqrzZXl7ACUuo1dH0Eipm41j2+BZWlQjiUgq5uj8+yzy+EU1ZRRyJcOKzbDACeuD + BpWWSXGBI5G4CppeYLjMUHZpJYeX1USULJQd2c4crLJKb76E8gz3Z9kN + -----END RSA PRIVATE KEY----- + + interfaces: + port1: + mac_address: "fa:16:3e:9b:89:52" + pci_address: "0000:00:04.0" + link: link1 + port2: + mac_address: "fa:16:3e:7a:33:60" + pci_address: "0000:00:05.0" + link: link4 + port3: + mac_address: "fa:16:3e:29:b7:ae" + pci_address: "0000:00:06.0" + link: link3 + port4: + mac_address: "fa:16:3e:76:8d:ff" + pci_address: "0000:00:07.0" + link: link6 + DUT2: + type: DUT + host: "10.30.51.156" + port: 22 + username: cisco + honeycomb: + user: admin + passwd: admin + port: 8183 + netconf_port: 2831 + priv_key: | + -----BEGIN RSA PRIVATE KEY----- + MIIEpgIBAAKCAQEAwUDlTpzSHpwLQotZOFS4AgcPNEWCnP1AB2hWFmvI+8Kah/gb + v8ruZU9RqhPs56tyKzxbhvNkY4VbH5F1GilHZu3mLqzM4KfghMmaeMEjO1T7BYYd + vuBfTvIluljfQ2vAlnYrDwn+ClxJk81m0pDgvrLEX4qVVh2sGh7UEkYy5r82DNa2 + 4VjzPB1J/c8a9zP8FoZUhYIzF4FLvRMjUADpbMXgJMsGpaZLmz95ap0Eot7vb1Cc + 1LvF97iyBCrtIOSKRKA50ZhLGjMKmOwnYU+cP5718tbproDVi6VJOo7zeuXyetMs + 8YBl9kWblWG9BqP9jctFvsmi5G7hXgq1Y8u+DwIDAQABAoIBAQC/W4E0DHjLMny7 + 0bvw2YKzD0Zw3fttdB94tkm4PdZv5MybooPnsAvLaXVV0hEdfVi5kzSWNl/LY/tN + EP1BgGphc2QgB59/PPxGwFIjDCvUzlsZpynBHe+B/qh5ExNQcVvsIOqWI7DXlXaN + 0i/khOzmJ6HncRRah1spKimYRsaUUDskyg7q3QqMWVaqBbbMvLs/w7ZWd/zoDqCU + MY/pCI6hkB3QbRo0OdiZLohphBl2ShABTwjvVyyKL5UA4jAEneJrhH5gWVLXnfgD + p62W5CollKEYblC8mUkPxpP7Qo277zw3xaq+oktIZhc5SUEUd7nJZtNqVAHqkItW + 79VmpKyxAoGBAPfU+kqNPaTSvp+x1n5sn2SgipzDtgi9QqNmC4cjtrQQaaqI57SG + OHw1jX8i7L2G1WvVtkHg060nlEVo5n65ffFOqeVBezLVJ7ghWI8U+oBiJJyQ4boD + GJVNsoOSUQ0rtuGd9eVwfDk3ol9aCN0KK53oPfIYli29pyu4l095kg11AoGBAMef + bPEMBI/2XmCPshLSwhGFl+dW8d+Klluj3CUQ/0vUlvma3dfBOYNsIwAgTP0iIUTg + 8DYE6KBCdPtxAUEI0YAEAKB9ry1tKR2NQEIPfslYytKErtwjAiqSi0heM6+zwEzu + f54Z4oBhsMSL0jXoOMnu+NZzEc6EUdQeY4O+jhjzAoGBAIogC3dtjMPGKTP7+93u + UE/XIioI8fWg9fj3sMka4IMu+pVvRCRbAjRH7JrFLkjbUyuMqs3Arnk9K+gbdQt/ + +m95Njtt6WoFXuPCwgbM3GidSmZwYT4454SfDzVBYScEDCNm1FuR+8ov9bFLDtGT + D4gsngnGJj1MDFXTxZEn4nzZAoGBAKCg4WmpUPaCuXibyB+rZavxwsTNSn2lJ83/ + sYJGBhf/raiV/FLDUcM1vYg5dZnu37RsB/5/vqxOLZGyYd7x+Jo5HkQGPnKgNwhn + g8BkdZIRF8uEJqxOo0ycdOU7n/2O93swIpKWo5LIiRPuqqzj+uZKnAL7vuVdxfaY + qVz2daMPAoGBALgaaKa3voU/HO1PYLWIhFrBThyJ+BQSQ8OqrEzC8AnegWFxRAM8 + EqrzZXl7ACUuo1dH0Eipm41j2+BZWlQjiUgq5uj8+yzy+EU1ZRRyJcOKzbDACeuD + BpWWSXGBI5G4CppeYLjMUHZpJYeX1USULJQd2c4crLJKb76E8gz3Z9kN + -----END RSA PRIVATE KEY----- + + interfaces: + port1: + mac_address: "fa:16:3e:ad:6c:7d" + pci_address: "0000:00:04.0" + link: link2 + port2: + mac_address: "fa:16:3e:94:a4:99" + pci_address: "0000:00:05.0" + link: link5 + port3: + mac_address: "fa:16:3e:75:92:da" + pci_address: "0000:00:06.0" + link: link3 + port4: + mac_address: "fa:16:3e:2c:b1:2a" + pci_address: "0000:00:07.0" + link: link6 + +**VPP Version** + +|vpp-release| + +**VPP Installed Packages** +:: + + $ dpkg -l vpp\* + Desired=Unknown/Install/Remove/Purge/Hold + | Status=Not/Inst/Conf-files/Unpacked/halF-conf/Half-inst/trig-aWait/Trig-pend + |/ Err?=(none)/Reinst-required (Status,Err: uppercase=bad) + ||/ Name Version Architecture Description + +++-==============-=============-============-============================================= + ii vpp 17.07-release amd64 Vector Packet Processing--executables + ii vpp-dbg 17.07-release amd64 Vector Packet Processing--debug symbols + ii vpp-dev 17.07-release amd64 Vector Packet Processing--development support + ii vpp-dpdk-dev 17.07-release amd64 Vector Packet Processing--development support + ii vpp-dpdk-dkms 17.07-release amd64 DPDK 2.1 igb_uio_driver + ii vpp-lib 17.07-release amd64 Vector Packet Processing--runtime libraries + ii vpp-plugins 17.07-release amd64 Vector Packet Processing--runtime plugins + +**VPP Startup Configuration** + +VPP startup configuration is common for all test cases. + +:: + + $ cat /etc/vpp/startup.conf + unix { + nodaemon + log /tmp/vpp.log + full-coredump + } + + api-trace { + on + } + + api-segment { + gid vpp + } + + cpu { + ## In the VPP there is one main thread and optionally the user can create worker(s) + ## The main thread and worker thread(s) can be pinned to CPU core(s) manually or automatically + + ## Manual pinning of thread(s) to CPU core(s) + + ## Set logical CPU core where main thread runs + # main-core 1 + + ## Set logical CPU core(s) where worker threads are running + # corelist-workers 2-3,18-19 + + ## Automatic pinning of thread(s) to CPU core(s) + + ## Sets number of CPU core(s) to be skipped (1 ... N-1) + ## Skipped CPU core(s) are not used for pinning main thread and working thread(s). + ## The main thread is automatically pinned to the first available CPU core and worker(s) + ## are pinned to next free CPU core(s) after core assigned to main thread + # skip-cores 4 + + ## Specify a number of workers to be created + ## Workers are pinned to N consecutive CPU cores while skipping "skip-cores" CPU core(s) + ## and main thread's CPU core + # workers 2 + + ## Set scheduling policy and priority of main and worker threads + + ## Scheduling policy options are: other (SCHED_OTHER), batch (SCHED_BATCH) + ## idle (SCHED_IDLE), fifo (SCHED_FIFO), rr (SCHED_RR) + # scheduler-policy fifo + + ## Scheduling priority is used only for "real-time policies (fifo and rr), + ## and has to be in the range of priorities supported for a particular policy + # scheduler-priority 50 + } + + dpdk { + ## Change default settings for all intefaces + # dev default { + ## Number of receive queues, enables RSS + ## Default is 1 + # num-rx-queues 3 + + ## Number of transmit queues, Default is equal + ## to number of worker threads or 1 if no workers treads + # num-tx-queues 3 + + ## Number of descriptors in transmit and receive rings + ## increasing or reducing number can impact performance + ## Default is 1024 for both rx and tx + # num-rx-desc 512 + # num-tx-desc 512 + + ## VLAN strip offload mode for interface + ## Default is off + # vlan-strip-offload on + # } + + ## Whitelist specific interface by specifying PCI address + # dev 0000:02:00.0 + + ## Whitelist specific interface by specifying PCI address and in + ## addition specify custom parameters for this interface + # dev 0000:02:00.1 { + # num-rx-queues 2 + # } + + ## Change UIO driver used by VPP, Options are: uio_pci_generic, vfio-pci + ## and igb_uio (default) + # uio-driver uio_pci_generic + + ## Disable mutli-segment buffers, improves performance but + ## disables Jumbo MTU support + # no-multi-seg + + ## Increase number of buffers allocated, needed only in scenarios with + ## large number of interfaces and worker threads. Value is per CPU socket. + ## Default is 32768 + # num-mbufs 128000 + + ## Change hugepages allocation per-socket, needed only if there is need for + ## larger number of mbufs. Default is 256M on each detected CPU socket + # socket-mem 2048,2048 + } + +TG Configuration +---------------- + +Traffic Generator node is VM running the same OS Linux as SUTs. Ports of this +VM are used as source (Tx) and destination (Rx) ports for the traffic. + +Traffic scripts of test cases are executed on this VM. + +**TG VM configuration** + +Configuration of the TG VMs is defined in file + + /csit/resources/tools/virl/topologies/double-ring-nested.xenial.virl + +- List of TG VM interfaces::: + + + + + + + + +**TG node port configuration** + +Port configuration of TG is defined in topology file that is generated per VIRL +simulation based on the definition stored in file + + /csit/resources/tools/virl/topologies/double-ring-nested.xenial.yaml + +Example of TG node configuration::: + + TG: + type: TG + host: "10.30.51.155" + port: 22 + username: cisco + priv_key: | + -----BEGIN RSA PRIVATE KEY----- + MIIEpgIBAAKCAQEAwUDlTpzSHpwLQotZOFS4AgcPNEWCnP1AB2hWFmvI+8Kah/gb + v8ruZU9RqhPs56tyKzxbhvNkY4VbH5F1GilHZu3mLqzM4KfghMmaeMEjO1T7BYYd + vuBfTvIluljfQ2vAlnYrDwn+ClxJk81m0pDgvrLEX4qVVh2sGh7UEkYy5r82DNa2 + 4VjzPB1J/c8a9zP8FoZUhYIzF4FLvRMjUADpbMXgJMsGpaZLmz95ap0Eot7vb1Cc + 1LvF97iyBCrtIOSKRKA50ZhLGjMKmOwnYU+cP5718tbproDVi6VJOo7zeuXyetMs + 8YBl9kWblWG9BqP9jctFvsmi5G7hXgq1Y8u+DwIDAQABAoIBAQC/W4E0DHjLMny7 + 0bvw2YKzD0Zw3fttdB94tkm4PdZv5MybooPnsAvLaXVV0hEdfVi5kzSWNl/LY/tN + EP1BgGphc2QgB59/PPxGwFIjDCvUzlsZpynBHe+B/qh5ExNQcVvsIOqWI7DXlXaN + 0i/khOzmJ6HncRRah1spKimYRsaUUDskyg7q3QqMWVaqBbbMvLs/w7ZWd/zoDqCU + MY/pCI6hkB3QbRo0OdiZLohphBl2ShABTwjvVyyKL5UA4jAEneJrhH5gWVLXnfgD + p62W5CollKEYblC8mUkPxpP7Qo277zw3xaq+oktIZhc5SUEUd7nJZtNqVAHqkItW + 79VmpKyxAoGBAPfU+kqNPaTSvp+x1n5sn2SgipzDtgi9QqNmC4cjtrQQaaqI57SG + OHw1jX8i7L2G1WvVtkHg060nlEVo5n65ffFOqeVBezLVJ7ghWI8U+oBiJJyQ4boD + GJVNsoOSUQ0rtuGd9eVwfDk3ol9aCN0KK53oPfIYli29pyu4l095kg11AoGBAMef + bPEMBI/2XmCPshLSwhGFl+dW8d+Klluj3CUQ/0vUlvma3dfBOYNsIwAgTP0iIUTg + 8DYE6KBCdPtxAUEI0YAEAKB9ry1tKR2NQEIPfslYytKErtwjAiqSi0heM6+zwEzu + f54Z4oBhsMSL0jXoOMnu+NZzEc6EUdQeY4O+jhjzAoGBAIogC3dtjMPGKTP7+93u + UE/XIioI8fWg9fj3sMka4IMu+pVvRCRbAjRH7JrFLkjbUyuMqs3Arnk9K+gbdQt/ + +m95Njtt6WoFXuPCwgbM3GidSmZwYT4454SfDzVBYScEDCNm1FuR+8ov9bFLDtGT + D4gsngnGJj1MDFXTxZEn4nzZAoGBAKCg4WmpUPaCuXibyB+rZavxwsTNSn2lJ83/ + sYJGBhf/raiV/FLDUcM1vYg5dZnu37RsB/5/vqxOLZGyYd7x+Jo5HkQGPnKgNwhn + g8BkdZIRF8uEJqxOo0ycdOU7n/2O93swIpKWo5LIiRPuqqzj+uZKnAL7vuVdxfaY + qVz2daMPAoGBALgaaKa3voU/HO1PYLWIhFrBThyJ+BQSQ8OqrEzC8AnegWFxRAM8 + EqrzZXl7ACUuo1dH0Eipm41j2+BZWlQjiUgq5uj8+yzy+EU1ZRRyJcOKzbDACeuD + BpWWSXGBI5G4CppeYLjMUHZpJYeX1USULJQd2c4crLJKb76E8gz3Z9kN + -----END RSA PRIVATE KEY----- + + interfaces: + port3: + mac_address: "fa:16:3e:b9:e1:27" + pci_address: "0000:00:06.0" + link: link1 + driver: virtio-pci + port4: + mac_address: "fa:16:3e:e9:c8:68" + pci_address: "0000:00:07.0" + link: link4 + driver: virtio-pci + port5: + mac_address: "fa:16:3e:e8:d3:47" + pci_address: "0000:00:08.0" + link: link2 + driver: virtio-pci + port6: + mac_address: "fa:16:3e:cf:ca:58" + pci_address: "0000:00:09.0" + link: link5 + driver: virtio-pci + +**Traffic generator** + +Functional tests utilize Scapy as a traffic generator. There was used Scapy +v2.3.1 for |vpp-release| tests. + diff --git a/docs/report/vpp_functional_tests/csit_release_notes.rst b/docs/report/vpp_functional_tests/csit_release_notes.rst index 4b682209d6..afd2a68cfa 100644 --- a/docs/report/vpp_functional_tests/csit_release_notes.rst +++ b/docs/report/vpp_functional_tests/csit_release_notes.rst @@ -6,19 +6,19 @@ Changes in CSIT |release| #. VPP functional test environment changes - - Implemented VAT command history collection for every test case as part of teardown. - - Introduction of Centos7 tests in VIRL environment. + - Implemented VAT command history collection for every test case as part of teardown. + - Introduction of Centos7 tests in VIRL environment. #. VPP functional test framework changes - - Added VAT command history collection for every test case as part of teardown. + - Added VAT command history collection for every test case as part of teardown. #. Added VPP functional tests - - IPv4 routed-forwarding with dot1q VLAN sub-interfaces. - - L2BD switched-forwarding with dot1q VLAN sub-interfaces and vhost-user to VM. - - IPv4 routed-forwarding with vhost-user interfaces to VM. - - Vhost-user interface re-connect tests. + - IPv4 routed-forwarding with dot1q VLAN sub-interfaces. + - L2BD switched-forwarding with dot1q VLAN sub-interfaces and vhost-user to VM. + - IPv4 routed-forwarding with vhost-user interfaces to VM. + - Vhost-user interface re-connect tests. Known Issues ------------ diff --git a/docs/report/vpp_functional_tests/overview.rst b/docs/report/vpp_functional_tests/overview.rst index a6de3f3282..e8f8460d34 100644 --- a/docs/report/vpp_functional_tests/overview.rst +++ b/docs/report/vpp_functional_tests/overview.rst @@ -135,20 +135,20 @@ CSIT |release| follows a common structured naming convention for all performance and system functional tests, introduced in CSIT |release-1|. The naming should be intuitive for majority of the tests. Complete -description of CSIT test naming convention is provided on `CSIT test naming -page `_. +description of CSIT test naming convention is provided on +`CSIT test naming wiki page `_. Here few illustrative examples of the new naming usage for functional test suites: #. **Physical port to physical port - a.k.a. NIC-to-NIC, Phy-to-Phy, P2P** - - *eth2p-ethip4-ip4base-func.robot* => 2 ports of Ethernet, IPv4 baseline - routed forwarding, functional tests. + - *eth2p-ethip4-ip4base-func.robot* => 2 ports of Ethernet, IPv4 baseline + routed forwarding, functional tests. #. **Physical port to VM (or VM chain) to physical port - a.k.a. NIC2VM2NIC, P2V2P, NIC2VMchain2NIC, P2V2V2P** - - *eth2p-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-func.robot* => 2 ports of - Ethernet, IPv4 VXLAN Ethernet, L2 bridge-domain switching to/from two vhost - interfaces and one VM, functional tests. + - *eth2p-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-func.robot* => 2 ports of + Ethernet, IPv4 VXLAN Ethernet, L2 bridge-domain switching to/from two vhost + interfaces and one VM, functional tests. diff --git a/docs/report/vpp_functional_tests/test_environment.rst b/docs/report/vpp_functional_tests/test_environment.rst index 8add53189c..bd57558667 100644 --- a/docs/report/vpp_functional_tests/test_environment.rst +++ b/docs/report/vpp_functional_tests/test_environment.rst @@ -16,7 +16,7 @@ Whenever a patch is submitted to gerrit for review, parallel VIRL simulations are started to reduce the time of execution of all functional tests. The number of parallel VIRL simulations is equal to number of test groups defined by TEST_GROUPS variable in csit/bootstrap.sh file. The VIRL host to run VIRL -simulation is selected randomly per VIRL simulation. +simulation is selected based on least load algorithm per VIRL simulation. Every VIRL simulation uses the same three-node - Traffic Generator (TG node) and two Systems Under Test (SUT1 and SUT2) - "double-ring" topology. The appropriate @@ -50,7 +50,7 @@ SUT Configuration - VIRL Guest OS Linux In CSIT terminology, the VM operating system for both SUTs that |vpp-release| has been tested with, is the following: -**#. |virl-image-ubuntu|** +**#. Ubuntu VIRL image** This image implies Ubuntu 16.04.1 LTS, current as of yyyy-mm-dd (that is, package versions are those that would have been installed by a "apt-get update", @@ -64,7 +64,7 @@ kernel package version) are included in CSIT source repository: A replica of this VM image can be built by running the "build.sh" script in CSIT repository resources/tools/disk-image-builder/ubuntu. -**#. |virl-image-centos|** +**#. CentOS VIRL image** The Centos7.3 image is ready to be used but no tests running on it now. Corresponding Jenkins jobs are under preparation. @@ -85,8 +85,8 @@ VM over a vhost-user interface, utilize a "nested" VM image. This "nested" VM is dynamically created and destroyed as part of a test case, and therefore the "nested" VM image is optimized to be small, lightweight and have a short boot time. The "nested" VM image is not built around any -established Linux distribution, but is based on BuildRoot -(https://buildroot.org/), a tool for building embedded Linux systems. Just as +established Linux distribution, but is based on `BuildRoot +`_, a tool for building embedded Linux systems. Just as for the "main" image, scripts to produce an identical replica of the "nested" image are included in CSIT GIT repository, and the image can be rebuilt using the "build.sh" script at: @@ -146,7 +146,7 @@ Example of DUT nodes configuration::: EqrzZXl7ACUuo1dH0Eipm41j2+BZWlQjiUgq5uj8+yzy+EU1ZRRyJcOKzbDACeuD BpWWSXGBI5G4CppeYLjMUHZpJYeX1USULJQd2c4crLJKb76E8gz3Z9kN -----END RSA PRIVATE KEY----- - + interfaces: port1: mac_address: "fa:16:3e:9b:89:52" @@ -202,7 +202,7 @@ Example of DUT nodes configuration::: EqrzZXl7ACUuo1dH0Eipm41j2+BZWlQjiUgq5uj8+yzy+EU1ZRRyJcOKzbDACeuD BpWWSXGBI5G4CppeYLjMUHZpJYeX1USULJQd2c4crLJKb76E8gz3Z9kN -----END RSA PRIVATE KEY----- - + interfaces: port1: mac_address: "fa:16:3e:ad:6c:7d" @@ -245,6 +245,7 @@ Example of DUT nodes configuration::: **VPP Startup Configuration** VPP startup configuration is common for all test cases. + :: $ cat /etc/vpp/startup.conf @@ -253,95 +254,95 @@ VPP startup configuration is common for all test cases. log /tmp/vpp.log full-coredump } - + api-trace { on } - + api-segment { gid vpp } - + cpu { ## In the VPP there is one main thread and optionally the user can create worker(s) ## The main thread and worker thread(s) can be pinned to CPU core(s) manually or automatically - + ## Manual pinning of thread(s) to CPU core(s) - + ## Set logical CPU core where main thread runs # main-core 1 - + ## Set logical CPU core(s) where worker threads are running # corelist-workers 2-3,18-19 - + ## Automatic pinning of thread(s) to CPU core(s) - + ## Sets number of CPU core(s) to be skipped (1 ... N-1) ## Skipped CPU core(s) are not used for pinning main thread and working thread(s). ## The main thread is automatically pinned to the first available CPU core and worker(s) ## are pinned to next free CPU core(s) after core assigned to main thread # skip-cores 4 - + ## Specify a number of workers to be created ## Workers are pinned to N consecutive CPU cores while skipping "skip-cores" CPU core(s) ## and main thread's CPU core # workers 2 - + ## Set scheduling policy and priority of main and worker threads - + ## Scheduling policy options are: other (SCHED_OTHER), batch (SCHED_BATCH) ## idle (SCHED_IDLE), fifo (SCHED_FIFO), rr (SCHED_RR) # scheduler-policy fifo - + ## Scheduling priority is used only for "real-time policies (fifo and rr), ## and has to be in the range of priorities supported for a particular policy # scheduler-priority 50 } - + dpdk { ## Change default settings for all intefaces # dev default { ## Number of receive queues, enables RSS ## Default is 1 # num-rx-queues 3 - + ## Number of transmit queues, Default is equal ## to number of worker threads or 1 if no workers treads # num-tx-queues 3 - + ## Number of descriptors in transmit and receive rings ## increasing or reducing number can impact performance ## Default is 1024 for both rx and tx # num-rx-desc 512 # num-tx-desc 512 - + ## VLAN strip offload mode for interface ## Default is off # vlan-strip-offload on # } - + ## Whitelist specific interface by specifying PCI address # dev 0000:02:00.0 - + ## Whitelist specific interface by specifying PCI address and in ## addition specify custom parameters for this interface # dev 0000:02:00.1 { # num-rx-queues 2 # } - + ## Change UIO driver used by VPP, Options are: uio_pci_generic, vfio-pci ## and igb_uio (default) # uio-driver uio_pci_generic - + ## Disable mutli-segment buffers, improves performance but ## disables Jumbo MTU support # no-multi-seg - + ## Increase number of buffers allocated, needed only in scenarios with ## large number of interfaces and worker threads. Value is per CPU socket. ## Default is 32768 # num-mbufs 128000 - + ## Change hugepages allocation per-socket, needed only if there is need for ## larger number of mbufs. Default is 256M on each detected CPU socket # socket-mem 2048,2048 @@ -360,7 +361,7 @@ Traffic scripts of test cases are executed on this VM. Configuration of the TG VMs is defined in file /csit/resources/tools/virl/topologies/double-ring-nested.xenial.virl - + - List of TG VM interfaces::: @@ -412,7 +413,7 @@ Example of TG node configuration::: EqrzZXl7ACUuo1dH0Eipm41j2+BZWlQjiUgq5uj8+yzy+EU1ZRRyJcOKzbDACeuD BpWWSXGBI5G4CppeYLjMUHZpJYeX1USULJQd2c4crLJKb76E8gz3Z9kN -----END RSA PRIVATE KEY----- - + interfaces: port3: mac_address: "fa:16:3e:b9:e1:27" diff --git a/docs/report/vpp_performance_tests/csit_release_notes.rst b/docs/report/vpp_performance_tests/csit_release_notes.rst index 725b40bee2..ff992df896 100644 --- a/docs/report/vpp_performance_tests/csit_release_notes.rst +++ b/docs/report/vpp_performance_tests/csit_release_notes.rst @@ -6,44 +6,34 @@ Changes in CSIT |release| #. VPP performance test environment changes - - Further optimizations of VM and vhost-user test environment - Qemu virtio - queue size increased from default value of 256 to 1024. - - Addition of HW cryptodev devices - Intel QAT 8950 50G - in all three - LF FD.io physical testbeds. + - Further optimizations of VM and vhost-user test environment - various + Qemu virtio queue size testing with value of 256 and 1024. Applied + Linux CFS optimization to run VPP worker threads and Qemu worker threads + with highest priority. #. VPP performance test framework changes - - Added VAT command history collection for every test case as part of - teardown. + - Full code review, optimization and refactor. -#. Added VPP performance tests - - - **CGNAT** - - - Carrier Grade Network Address Translation tests with varying number - of users and ports per user: 1u-15p, 10u-15p, 100u-15p, 1000u-15p, - 2000u-15p, 4000u-15p - with Intel x520 NIC. +#. T-rex changes - - **vhost-user tests with one VM** + - Full refactor of T-rex driver and introduce of traffic profiles that + improves readability, manageability of traffic profiles for various + test scenarios. - - L2 Bridge Domain switched-forwarding with Intel x710 NIC, Intel x520 - NIC, Intel xl710 NIC. - - VXLAN and L2 Bridge Domain switched-forwarding with Intel x520 NIC. +#. Added VPP performance tests - - **vhost-user tests with two VMs service chain** + - **LXC memif** - - L2 cross-connect switched-forwarding with Intel x520 NIC, Intel xl710 - NIC. - - L2 Bridge Domain switched-forwarding with Intel x520 NIC, Intel xl710 - NIC. - - IPv4 routed-forwarding with Intel x520 NIC, Intel xl710 NIC. + - Memif interface tests interconnecting two VPP instances on single SUT. + Master VPP instance running on native OS with Intel x520 NIC and guest + VPP instance running in Linux Container (LXC) doing the L2 cross + connect loop. LXC running in privileged mode is pinned to dedicated + cores. All VPP instances are same version. - - **IPSec encryption with** + - **Stateful Security Groups** - - AES-GCM, CBC-SHA1 ciphers, in combination with IPv4 routed-forwarding - with Intel xl710 NIC. - - CBC-SHA1 ciphers, in combination with LISP-GPE overlay tunneling for - IPv4-over-IPv4 with Intel xl710 NIC. + - **VM vhost use cases** Performance Improvements ------------------------ @@ -59,62 +49,56 @@ NDR Throughput Non-Drop Rate Throughput discovery tests: -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ -| VPP Functionality | Test Name | VPP-16.09 | VPP-17.01 | VPP-17.04 | 17.01 to 17.04 | -| | | [Mpps] | [Mpps] | [Mpps] | Relative Change | -+===================+=================================================================+============+===========+===========+=================+ -| L2XC-vhost-VM | 10ge2p1x520: 64B-1t1c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc | 0.5 | 2.8 | 3.4 | 21% | -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ -| L2BD-vhost-VM | 10ge2p1x520: 64B-1t1c-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc | 0.4 | 2.7 | 3.1 | 15% | -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ -| IPv4 vhost | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-eth-2vhost-1vm-ndrdisc | 0.3 | 2.6 | 3.0 | 15% | -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ -| IPv4 LISP | 10ge2p1x520: 64B-1t1c-ethip4lispip4-ip4base-ndrdisc | 4.4 | 4.8 | 5.5 | 15% | -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ -| IPv6 | 10ge2p1x520: 78B-1t1c-ethip6-ip6base-ndrdisc | 3.0 | 7.3 | 8.1 | 11% | -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ -| IPv6 COP | 10ge2p1x520: 78B-1t1c-ethip6-ip6base-copwhtlistbase-ndrdisc | 6.1 | 6.1 | 6.9 | 13% | -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ -| IPv6 iAcl | 10ge2p1x520: 78B-1t1c-ethip6-ip6base-iacldstbase-ndrdisc | 6.5 | 6.1 | 6.9 | 13% | -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ -| IPv6 FIB 2M | 10ge2p1x520: 78B-1t1c-ethip6-ip6scale2m-ndrdisc | 5.3 | 4.2 | 4.6 | 10% | -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| VPP Functionality | Test Name | VPP-16.09 | VPP-17.01 | VPP-17.04 | VPP-17.07 | 17.04 to 17.07 | +| | | [Mpps] | [Mpps] | [Mpps] | [Mpps] | Relative Change | ++===================+=================================================================+===========+===========+===========+===========+=================+ +| L2XC-vhost-VM | 10ge2p1x520: 64B-1t1c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc | 0.5 | 2.8 | 3.4 | | ??% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| L2BD-vhost-VM | 10ge2p1x520: 64B-1t1c-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc | 0.4 | 2.7 | 3.1 | | ??% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv4 vhost | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-eth-2vhost-1vm-ndrdisc | 0.3 | 2.6 | 3.0 | | ??% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv4 LISP | 10ge2p1x520: 64B-1t1c-ethip4lispip4-ip4base-ndrdisc | 4.4 | 4.8 | 5.5 | | ??% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv6 | 10ge2p1x520: 78B-1t1c-ethip6-ip6base-ndrdisc | 3.0 | 7.3 | 8.1 | | ??% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv6 COP | 10ge2p1x520: 78B-1t1c-ethip6-ip6base-copwhtlistbase-ndrdisc | 6.1 | 6.1 | 6.9 | | ??% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv6 iAcl | 10ge2p1x520: 78B-1t1c-ethip6-ip6base-iacldstbase-ndrdisc | 6.5 | 6.1 | 6.9 | | ??% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv6 FIB 2M | 10ge2p1x520: 78B-1t1c-ethip6-ip6scale2m-ndrdisc | 5.3 | 4.2 | 4.6 | | ??% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ PDR Throughput ~~~~~~~~~~~~~~ Partial Drop Rate thoughput discovery tests with packet Loss Tolerance of 0.5%: -+-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------------+ -| VPP Functionality | Test Name | VPP-16.09 | VPP-17.01 | VPP-17.04 | 17.01 to 17.04 | -| | | [Mpps] | [Mpps] | [Mpps] | Relative Change | -+===================+=================================================================+===========+===========+===========+=================+ -| L2XC-vhost-VM | 10ge2p1x520: 64B-1t1c-eth-l2xcbase-eth-2vhost-1vm-pdrdisc | 2.6 | 3.2 | 3.7 | 15% | -+-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------------+ -| L2BD-vhost-VM | 10ge2p1x520: 64B-1t1c-eth-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc | 2.1 | 2.9 | 3.3 | 14% | -+-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------------+ -| IPv4 vhost | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-eth-2vhost-1vm-pdrdisc | 2.0 | 2.7 | 3.0 | 11% | -+-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------------+ -| IPv4 LISP | 10ge2p1x520: 64B-1t1c-ethip4lispip4-ip4base-pdrdisc | 4.6 | 4.8 | 5.5 | 15% | -+-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------------+ -| IPv6 | 10ge2p1x520: 78B-1t1c-ethip6-ip6base-pdrdisc | 7.7 | 7.3 | 8.1 | 11% | -+-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------------+ -| IPv6 COP | 10ge2p1x520: 78B-1t1c-ethip6-ip6base-copwhtlistbase-pdrdisc | 6.1 | 6.1 | 6.9 | 13% | -+-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------------+ -| IPv6 iAcl | 10ge2p1x520: 78B-1t1c-ethip6-ip6base-iacldstbase-pdrdisc | 6.5 | 6.1 | 6.9 | 13% | -+-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------------+ -| IPv6 FIB 2M | 10ge2p1x520: 78B-1t1c-ethip6-ip6scale2m-pdrdisc | 5.3 | 4.2 | 4.6 | 10% | -+-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------------+ ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| VPP Functionality | Test Name | VPP-16.09 | VPP-17.01 | VPP-17.04 | VPP-17.07 | 17.04 to 17.07 | +| | | [Mpps] | [Mpps] | [Mpps] | [Mpps] | Relative Change | ++===================+=================================================================+===========+===========+===========+===========+=================+ +| L2XC-vhost-VM | 10ge2p1x520: 64B-1t1c-eth-l2xcbase-eth-2vhost-1vm-pdrdisc | 2.6 | 3.2 | 3.7 | | ??% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| L2BD-vhost-VM | 10ge2p1x520: 64B-1t1c-eth-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc | 2.1 | 2.9 | 3.3 | | ??% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv4 vhost | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-eth-2vhost-1vm-pdrdisc | 2.0 | 2.7 | 3.0 | | ??% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv4 LISP | 10ge2p1x520: 64B-1t1c-ethip4lispip4-ip4base-pdrdisc | 4.6 | 4.8 | 5.5 | | ??% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv6 | 10ge2p1x520: 78B-1t1c-ethip6-ip6base-pdrdisc | 7.7 | 7.3 | 8.1 | | ??% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv6 COP | 10ge2p1x520: 78B-1t1c-ethip6-ip6base-copwhtlistbase-pdrdisc | 6.1 | 6.1 | 6.9 | | ??% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv6 iAcl | 10ge2p1x520: 78B-1t1c-ethip6-ip6base-iacldstbase-pdrdisc | 6.5 | 6.1 | 6.9 | | ??% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv6 FIB 2M | 10ge2p1x520: 78B-1t1c-ethip6-ip6scale2m-pdrdisc | 5.3 | 4.2 | 4.6 | | ??% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ Measured improvements are in line with VPP code optimizations listed in -`VPP-17.04 release notes -`_. - -Additionally, vhost-VM performance improvements are due to both VPP code -optimizations as well as due to the FD.io CSIT Linux KVM test environment -optimizations for vhost-VM tests - see section "2.1.7. Methodology: KVM VM -vhost". - +`VPP-17.07 release notes +`_. Other Performance Changes ------------------------- @@ -129,78 +113,78 @@ NDR Throughput Non-Drop Rate Throughput discovery tests: -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ -| VPP Functionality | Test Name | VPP-16.09 | VPP-17.01 | VPP-17.04 | 17.01 to 17.04 | -| | | [Mpps] | [Mpps] | [Mpps] | Relative Change | -+===================+=================================================================+============+===========+===========+=================+ -| L2XC | 10ge2p1x520: 64B-1t1c-eth-l2xcbase-ndrdisc | 9.4 | 12.7 | 13.1 | 3% | -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ -| L2XC | 10ge2p1xl710: 64B-1t1c-eth-l2xcbase-ndrdisc | 9.5 | 12.2 | 12.4 | 2% | -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ -| L2XC dot1ad | 10ge2p1x520: 64B-1t1c-dot1ad-l2xcbase-ndrdisc | 7.4 | 8.8 | 9.3 | 6% | -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ -| L2XC dot1q | 10ge2p1x520: 64B-1t1c-dot1q-l2xcbase-ndrdisc | 7.5 | 8.8 | 9.2 | 5% | -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ -| L2XC VxLAN | 10ge2p1x520: 64B-1t1c-ethip4vxlan-l2xcbase-ndrdisc | 5.4 | 6.5 | 6.8 | 5% | -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ -| L2BD | 10ge2p1x520: 64B-1t1c-eth-l2bdbasemaclrn-ndrdisc | 7.8 | 10.4 | 10.8 | 4% | -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ -| IPv4 | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-ndrdisc | 8.7 | 9.7 | 10.6 | 9% | -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ -| IPv4 COP | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-copwhtlistbase-ndrdisc | 7.1 | 8.3 | 9.0 | 8% | -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ -| IPv4 iAcl | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-iacldstbase-ndrdisc | 6.9 | 7.6 | 8.3 | 9% | -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ -| IPv4 FIB 200k | 10ge2p1x520: 64B-1t1c-ethip4-ip4scale200k-ndrdisc | 8.5 | 9.0 | 9.7 | 8% | -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ -| IPv4 FIB 20k | 10ge2p1x520: 64B-1t1c-ethip4-ip4scale20k-ndrdisc | 8.5 | 9.0 | 9.7 | 8% | -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ -| IPv4 FIB 2M | 10ge2p1x520: 64B-1t1c-ethip4-ip4scale2m-ndrdisc | 8.5 | 7.8 | 8.1 | 4% | -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ -| IPv4 Policer | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-ipolicemarkbase-ndrdisc | 6.9 | 7.4 | 8.1 | 9% | -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ -| IPv6 FIB 200k | 10ge2p1x520: 78B-1t1c-ethip6-ip6scale200k-ndrdisc | 6.5 | 5.3 | 5.3 | 0% | -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ -| IPv6 FIB 20k | 10ge2p1x520: 78B-1t1c-ethip6-ip6scale20k-ndrdisc | 6.9 | 6.5 | 6.9 | 6% | -+-------------------+-----------------------------------------------------------------+------------+-----------+-----------+-----------------+ ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| VPP Functionality | Test Name | VPP-16.09 | VPP-17.01 | VPP-17.04 | VPP-17.07 | 17.04 to 17.07 | +| | | [Mpps] | [Mpps] | [Mpps] | [Mpps] | Relative Change | ++===================+=================================================================+===========+===========+===========+===========+=================+ +| L2XC | 10ge2p1x520: 64B-1t1c-eth-l2xcbase-ndrdisc | 9.4 | 12.7 | 13.1 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| L2XC | 10ge2p1xl710: 64B-1t1c-eth-l2xcbase-ndrdisc | 9.5 | 12.2 | 12.4 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| L2XC dot1ad | 10ge2p1x520: 64B-1t1c-dot1ad-l2xcbase-ndrdisc | 7.4 | 8.8 | 9.3 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| L2XC dot1q | 10ge2p1x520: 64B-1t1c-dot1q-l2xcbase-ndrdisc | 7.5 | 8.8 | 9.2 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| L2XC VxLAN | 10ge2p1x520: 64B-1t1c-ethip4vxlan-l2xcbase-ndrdisc | 5.4 | 6.5 | 6.8 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| L2BD | 10ge2p1x520: 64B-1t1c-eth-l2bdbasemaclrn-ndrdisc | 7.8 | 10.4 | 10.8 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv4 | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-ndrdisc | 8.7 | 9.7 | 10.6 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv4 COP | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-copwhtlistbase-ndrdisc | 7.1 | 8.3 | 9.0 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv4 iAcl | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-iacldstbase-ndrdisc | 6.9 | 7.6 | 8.3 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv4 FIB 200k | 10ge2p1x520: 64B-1t1c-ethip4-ip4scale200k-ndrdisc | 8.5 | 9.0 | 9.7 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv4 FIB 20k | 10ge2p1x520: 64B-1t1c-ethip4-ip4scale20k-ndrdisc | 8.5 | 9.0 | 9.7 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv4 FIB 2M | 10ge2p1x520: 64B-1t1c-ethip4-ip4scale2m-ndrdisc | 8.5 | 7.8 | 8.1 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv4 Policer | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-ipolicemarkbase-ndrdisc | 6.9 | 7.4 | 8.1 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv6 FIB 200k | 10ge2p1x520: 78B-1t1c-ethip6-ip6scale200k-ndrdisc | 6.5 | 5.3 | 5.3 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv6 FIB 20k | 10ge2p1x520: 78B-1t1c-ethip6-ip6scale20k-ndrdisc | 6.9 | 6.5 | 6.9 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ PDR Throughput ~~~~~~~~~~~~~~ Partial Drop Rate thoughput discovery tests with packet Loss Tolerance of 0.5%: -+-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------------+ -| VPP Functionality | Test Name | VPP-16.09 | VPP-17.01 | VPP-17.04 | 17.01 to 17.04 | -| | | [Mpps] | [Mpps] | [Mpps] | Relative Change | -+===================+=================================================================+===========+===========+===========+=================+ -| L2XC | 10ge2p1x520: 64B-1t1c-eth-l2xcbase-pdrdisc | 9.4 | 12.7 | 13.4 | 6% | -+-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------------+ -| L2XC dot1ad | 10ge2p1x520: 64B-1t1c-dot1ad-l2xcbase-pdrdisc | 7.4 | 8.8 | 9.3 | 6% | -+-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------------+ -| L2XC dot1q | 10ge2p1x520: 64B-1t1c-dot1q-l2xcbase-pdrdisc | 7.5 | 8.8 | 9.2 | 5% | -+-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------------+ -| L2XC VxLAN | 10ge2p1x520: 64B-1t1c-ethip4vxlan-l2xcbase-pdrdisc | 5.4 | 6.5 | 6.8 | 5% | -+-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------------+ -| L2BD | 10ge2p1x520: 64B-1t1c-eth-l2bdbasemaclrn-pdrdisc | 7.8 | 10.6 | 10.8 | 2% | -+-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------------+ -| IPv4 | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-pdrdisc | 8.7 | 9.7 | 10.6 | 9% | -+-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------------+ -| IPv4 COP | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-copwhtlistbase-pdrdisc | 7.1 | 8.3 | 9.2 | 11% | -+-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------------+ -| IPv4 iAcl | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-iacldstbase-pdrdisc | 7.1 | 7.6 | 8.3 | 9% | -+-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------------+ -| IPv4 FIB 200k | 10ge2p1x520: 64B-1t1c-ethip4-ip4scale200k-pdrdisc | 8.5 | 9.0 | 9.7 | 8% | -+-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------------+ -| IPv4 FIB 20k | 10ge2p1x520: 64B-1t1c-ethip4-ip4scale20k-pdrdisc | 8.5 | 9.0 | 9.7 | 8% | -+-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------------+ -| IPv4 FIB 2M | 10ge2p1x520: 64B-1t1c-ethip4-ip4scale2m-pdrdisc | 8.3 | 8.1 | 8.1 | 0% | -+-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------------+ -| IPv4 Policer | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-ipolicemarkbase-pdrdisc | 7.1 | 7.4 | 8.1 | 9% | -+-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------------+ -| IPv6 FIB 200k | 10ge2p1x520: 78B-1t1c-ethip6-ip6scale200k-pdrdisc | 6.9 | 5.3 | 5.3 | 0% | -+-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------------+ -| IPv6 FIB 20k | 10ge2p1x520: 78B-1t1c-ethip6-ip6scale20k-pdrdisc | 6.9 | 6.5 | 6.9 | 6% | -+-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------------+ ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| VPP Functionality | Test Name | VPP-16.09 | VPP-17.01 | VPP-17.04 | VPP-17.07 | 17.04 to 17.07 | +| | | [Mpps] | [Mpps] | [Mpps] | [Mpps] | Relative Change | ++===================+=================================================================+===========+===========+===========+===========+=================+ +| L2XC | 10ge2p1x520: 64B-1t1c-eth-l2xcbase-pdrdisc | 9.4 | 12.7 | 13.4 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| L2XC dot1ad | 10ge2p1x520: 64B-1t1c-dot1ad-l2xcbase-pdrdisc | 7.4 | 8.8 | 9.3 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| L2XC dot1q | 10ge2p1x520: 64B-1t1c-dot1q-l2xcbase-pdrdisc | 7.5 | 8.8 | 9.2 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| L2XC VxLAN | 10ge2p1x520: 64B-1t1c-ethip4vxlan-l2xcbase-pdrdisc | 5.4 | 6.5 | 6.8 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| L2BD | 10ge2p1x520: 64B-1t1c-eth-l2bdbasemaclrn-pdrdisc | 7.8 | 10.6 | 10.8 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv4 | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-pdrdisc | 8.7 | 9.7 | 10.6 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv4 COP | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-copwhtlistbase-pdrdisc | 7.1 | 8.3 | 9.2 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv4 iAcl | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-iacldstbase-pdrdisc | 7.1 | 7.6 | 8.3 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv4 FIB 200k | 10ge2p1x520: 64B-1t1c-ethip4-ip4scale200k-pdrdisc | 8.5 | 9.0 | 9.7 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv4 FIB 20k | 10ge2p1x520: 64B-1t1c-ethip4-ip4scale20k-pdrdisc | 8.5 | 9.0 | 9.7 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv4 FIB 2M | 10ge2p1x520: 64B-1t1c-ethip4-ip4scale2m-pdrdisc | 8.3 | 8.1 | 8.1 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv4 Policer | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-ipolicemarkbase-pdrdisc | 7.1 | 7.4 | 8.1 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv6 FIB 200k | 10ge2p1x520: 78B-1t1c-ethip6-ip6scale200k-pdrdisc | 6.9 | 5.3 | 5.3 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ +| IPv6 FIB 20k | 10ge2p1x520: 78B-1t1c-ethip6-ip6scale20k-pdrdisc | 6.9 | 6.5 | 6.9 | | ?% | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-----------+-----------------+ Known Issues ------------ diff --git a/docs/report/vpp_performance_tests/overview.rst b/docs/report/vpp_performance_tests/overview.rst index ccf8063ec2..a28d8572eb 100644 --- a/docs/report/vpp_performance_tests/overview.rst +++ b/docs/report/vpp_performance_tests/overview.rst @@ -104,7 +104,8 @@ sensitivity to Linux kernel scheduler settings and behaviour, this estimation may not always yield good enough accuracy. For detailed LF FD.io test bed specification and physical topology please refer -to `LF FDio CSIT testbed wiki page `_. +to `LF FDio CSIT testbed wiki page +`_. Performance Tests Coverage -------------------------- @@ -205,38 +206,38 @@ suites: #. **Physical port to physical port - a.k.a. NIC-to-NIC, Phy-to-Phy, P2P** - - *PortNICConfig-WireEncapsulation-PacketForwardingFunction- - PacketProcessingFunction1-...-PacketProcessingFunctionN-TestType* - - *10ge2p1x520-dot1q-l2bdbasemaclrn-ndrdisc.robot* => 2 ports of 10GE on - Intel x520 NIC, dot1q tagged Ethernet, L2 bridge-domain baseline switching - with MAC learning, NDR throughput discovery. - - *10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrchk.robot* => 2 ports of 10GE - on Intel x520 NIC, IPv4 VXLAN Ethernet, L2 bridge-domain baseline - switching with MAC learning, NDR throughput discovery. - - *10ge2p1x520-ethip4-ip4base-ndrdisc.robot* => 2 ports of 10GE on Intel - x520 NIC, IPv4 baseline routed forwarding, NDR throughput discovery. - - *10ge2p1x520-ethip6-ip6scale200k-ndrdisc.robot* => 2 ports of 10GE on - Intel x520 NIC, IPv6 scaled up routed forwarding, NDR throughput - discovery. + - *PortNICConfig-WireEncapsulation-PacketForwardingFunction- + PacketProcessingFunction1-...-PacketProcessingFunctionN-TestType* + - *10ge2p1x520-dot1q-l2bdbasemaclrn-ndrdisc.robot* => 2 ports of 10GE on + Intel x520 NIC, dot1q tagged Ethernet, L2 bridge-domain baseline switching + with MAC learning, NDR throughput discovery. + - *10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrchk.robot* => 2 ports of 10GE + on Intel x520 NIC, IPv4 VXLAN Ethernet, L2 bridge-domain baseline + switching with MAC learning, NDR throughput discovery. + - *10ge2p1x520-ethip4-ip4base-ndrdisc.robot* => 2 ports of 10GE on Intel + x520 NIC, IPv4 baseline routed forwarding, NDR throughput discovery. + - *10ge2p1x520-ethip6-ip6scale200k-ndrdisc.robot* => 2 ports of 10GE on + Intel x520 NIC, IPv6 scaled up routed forwarding, NDR throughput + discovery. #. **Physical port to VM (or VM chain) to physical port - a.k.a. NIC2VM2NIC, P2V2P, NIC2VMchain2NIC, P2V2V2P** - - *PortNICConfig-WireEncapsulation-PacketForwardingFunction- - PacketProcessingFunction1-...-PacketProcessingFunctionN-VirtEncapsulation- - VirtPortConfig-VMconfig-TestType* - - *10ge2p1x520-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc.robot* => 2 ports - of 10GE on Intel x520 NIC, dot1q tagged Ethernet, L2 bridge-domain - switching to/from two vhost interfaces and one VM, NDR throughput - discovery. - - *10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc.robot* => 2 - ports of 10GE on Intel x520 NIC, IPv4 VXLAN Ethernet, L2 bridge-domain - switching to/from two vhost interfaces and one VM, NDR throughput - discovery. - - *10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-4vhost-2vm-ndrdisc.robot* => 2 - ports of 10GE on Intel x520 NIC, IPv4 VXLAN Ethernet, L2 bridge-domain - switching to/from four vhost interfaces and two VMs, NDR throughput - discovery. + - *PortNICConfig-WireEncapsulation-PacketForwardingFunction- + PacketProcessingFunction1-...-PacketProcessingFunctionN-VirtEncapsulation- + VirtPortConfig-VMconfig-TestType* + - *10ge2p1x520-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc.robot* => 2 ports + of 10GE on Intel x520 NIC, dot1q tagged Ethernet, L2 bridge-domain + switching to/from two vhost interfaces and one VM, NDR throughput + discovery. + - *10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc.robot* => 2 + ports of 10GE on Intel x520 NIC, IPv4 VXLAN Ethernet, L2 bridge-domain + switching to/from two vhost interfaces and one VM, NDR throughput + discovery. + - *10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-4vhost-2vm-ndrdisc.robot* => 2 + ports of 10GE on Intel x520 NIC, IPv4 VXLAN Ethernet, L2 bridge-domain + switching to/from four vhost interfaces and two VMs, NDR throughput + discovery. Methodology: Multi-Thread and Multi-Core ---------------------------------------- @@ -356,29 +357,29 @@ specific configuration. TRex is installed and run on the TG compute node. The typical procedure is: - - If the TRex is not already installed on TG, it is installed in the - suite setup phase - see `TRex intallation`_. - - TRex configuration is set in its configuration file - :: +- If the TRex is not already installed on TG, it is installed in the + suite setup phase - see `TRex intallation`_. +- TRex configuration is set in its configuration file + :: - /etc/trex_cfg.yaml + /etc/trex_cfg.yaml - - TRex is started in the background mode - :: +- TRex is started in the background mode + :: - sh -c 'cd /opt/trex-core-2.25/scripts/ && sudo nohup ./t-rex-64 -i -c 7 --iom 0 > /dev/null 2>&1 &' > /dev/null + $ sh -c 'cd /opt/trex-core-2.25/scripts/ && sudo nohup ./t-rex-64 -i -c 7 --iom 0 > /dev/null 2>&1 &' > /dev/null - - There are traffic streams dynamically prepared for each test. The traffic - is sent and the statistics obtained using trex_stl_lib.api.STLClient. +- There are traffic streams dynamically prepared for each test. The traffic + is sent and the statistics obtained using trex_stl_lib.api.STLClient. **Measuring packet loss** - - Create an instance of STLClient - - Connect to the client - - Add all streams - - Clear statistics - - Send the traffic for defined time - - Get the statistics +- Create an instance of STLClient +- Connect to the client +- Add all streams +- Clear statistics +- Send the traffic for defined time +- Get the statistics If there is a warm-up phase required, the traffic is sent also before test and the statistics are ignored. diff --git a/docs/report/vpp_performance_tests/packet_latency_graphs/index.rst b/docs/report/vpp_performance_tests/packet_latency_graphs/index.rst index 307bf3caf1..7db10ee0cd 100644 --- a/docs/report/vpp_performance_tests/packet_latency_graphs/index.rst +++ b/docs/report/vpp_performance_tests/packet_latency_graphs/index.rst @@ -25,9 +25,9 @@ TGint2-to-SUT2-to-SUT1-to-TGint1. .. toctree:: l2 - ipv4 - ipv6 - ipv4_tunnels - ipv6_tunnels + ip4 + ip6 + ip4_tunnels + ip6_tunnels vm_vhost ipsec diff --git a/docs/report/vpp_performance_tests/packet_latency_graphs/ip4.rst b/docs/report/vpp_performance_tests/packet_latency_graphs/ip4.rst new file mode 100644 index 0000000000..daa2397dbf --- /dev/null +++ b/docs/report/vpp_performance_tests/packet_latency_graphs/ip4.rst @@ -0,0 +1,60 @@ +IPv4 Routed-Forwarding +====================== + +This section includes summary graphs of VPP Phy-to-Phy packet latency +with IPv4 Routed-Forwarding measured at 50% of discovered NDR throughput +rate. Latency is reported for VPP running in multiple configurations of +VPP worker thread(s), a.k.a. VPP data plane thread(s), and their +physical CPU core(s) placement. + +VPP packet latency in 1t1c setup (1thread, 1core) is presented in the graph below. + +.. raw:: html + + + +*Figure 1. VPP 1thread 1core - packet latency for Phy-to-Phy IPv4 Routed-Forwarding.* + +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + +.. code-block:: bash + + $ cd $CSIT/tests/vpp/perf/ip4 + $ grep -P '64B-1t1c-ethip4-ip4(base|scale)[a-z0-9]*(?!-eth-[0-9]vhost).*-ndrdisc' * + + 10ge2p1x520-ethip4-ip4base-copwhtlistbase-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-copwhtlistbase-ndrdisc + 10ge2p1x520-ethip4-ip4base-iacldstbase-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-iacldstbase-ndrdisc + 10ge2p1x520-ethip4-ip4base-ipolicemarkbase-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ipolicemarkbase-ndrdisc + 10ge2p1x520-ethip4-ip4base-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ndrdisc + 10ge2p1x520-ethip4-ip4base-snat-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-snat-1u-1p-ndrdisc + 10ge2p1x520-ethip4-ip4scale200k-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale200k-ndrdisc + 10ge2p1x520-ethip4-ip4scale20k-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale20k-ndrdisc + 10ge2p1x520-ethip4-ip4scale2m-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale2m-ndrdisc + 10ge2p1x520-ethip4-ip4scale-snat-ndrpdrdisc.robot:| tc11-64B-1t1c-ethip4-ip4base-snat-4000u-15p-ndrdisc + 40ge2p1xl710-ethip4-ip4base-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ndrdisc + +VPP packet latency in 2t2c setup (2thread, 2core) is presented in the graph below. + +.. raw:: html + + + +*Figure 2. VPP 2threads 2cores - packet latency for Phy-to-Phy IPv4 Routed-Forwarding.* + +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + +.. code-block:: bash + + $ cd $CSIT/tests/vpp/perf/ip4 + $ grep -P '64B-2t2c-ethip4-ip4(base|scale)[a-z0-9]*(?!-eth-[0-9]vhost).*-ndrdisc' * + + 10ge2p1x520-ethip4-ip4base-copwhtlistbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-copwhtlistbase-ndrdisc + 10ge2p1x520-ethip4-ip4base-iacldstbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-iacldstbase-ndrdisc + 10ge2p1x520-ethip4-ip4base-ipolicemarkbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ipolicemarkbase-ndrdisc + 10ge2p1x520-ethip4-ip4base-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ndrdisc + 10ge2p1x520-ethip4-ip4scale200k-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale200k-ndrdisc + 10ge2p1x520-ethip4-ip4scale20k-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale20k-ndrdisc + 10ge2p1x520-ethip4-ip4scale2m-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale2m-ndrdisc + 40ge2p1xl710-ethip4-ip4base-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ndrdisc diff --git a/docs/report/vpp_performance_tests/packet_latency_graphs/ip4_tunnels.rst b/docs/report/vpp_performance_tests/packet_latency_graphs/ip4_tunnels.rst new file mode 100644 index 0000000000..a041b103fa --- /dev/null +++ b/docs/report/vpp_performance_tests/packet_latency_graphs/ip4_tunnels.rst @@ -0,0 +1,51 @@ +IPv4 Overlay Tunnels +==================== + +This section includes summary graphs of VPP Phy-to-Phy packet latency +with IPv4 Overlay Tunnels measured at 50% of discovered NDR throughput +rate. Latency is reported for VPP running in multiple configurations of +VPP worker thread(s), a.k.a. VPP data plane thread(s), and their +physical CPU core(s) placement. + +VPP packet latency in 1t1c setup (1thread, 1core) is presented in the graph below. + +.. raw:: html + + + +*Figure 1. VPP 1thread 1core - packet latency for Phy-to-Phy IPv4 Overlay Tunnels.* + +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + +.. code-block:: bash + + $ cd $CSIT/tests/vpp/perf/ip4_tunnels + $ grep -E "64B-1t1c-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc" * + + 10ge2p1x520-ethip4lispip4-ip4base-ndrdisc.robot:| tc01-64B-1t1c-ethip4lispip4-ip4base-ndrdisc + 10ge2p1x520-ethip4lispip6-ip4base-ndrdisc.robot:| tc01-64B-1t1c-ethip4lispip6-ip4base-ndrdisc + 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrdisc.robot:| tc01-64B-1t1c-ethip4vxlan-l2bdbasemaclrn-ndrdisc + 10ge2p1x520-ethip4vxlan-l2xcbase-ndrdisc.robot:| tc01-64B-1t1c-ethip4vxlan-l2xcbase-ndrdisc + +VPP packet latency in 2t2c setup (2thread, 2core) is presented in the graph below. + +.. raw:: html + + + +*Figure 2. VPP 2threads 2cores - packet latency for Phy-to-Phy IPv4 Overlay Tunnels.* + +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + +.. code-block:: bash + + $ cd $CSIT/tests/vpp/perf/ip4_tunnels + $ grep -E "64B-2t2c-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc" * + + 10ge2p1x520-ethip4lispip4-ip4base-ndrdisc.robot:| tc07-64B-2t2c-ethip4lispip4-ip4base-ndrdisc + 10ge2p1x520-ethip4lispip6-ip4base-ndrdisc.robot:| tc07-64B-2t2c-ethip4lispip6-ip4base-ndrdisc + 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrdisc.robot:| tc07-64B-2t2c-ethip4vxlan-l2bdbasemaclrn-ndrdisc + 10ge2p1x520-ethip4vxlan-l2xcbase-ndrdisc.robot:| tc07-64B-2t2c-ethip4vxlan-l2xcbase-ndrdisc + diff --git a/docs/report/vpp_performance_tests/packet_latency_graphs/ip6.rst b/docs/report/vpp_performance_tests/packet_latency_graphs/ip6.rst new file mode 100644 index 0000000000..10aa9b99c7 --- /dev/null +++ b/docs/report/vpp_performance_tests/packet_latency_graphs/ip6.rst @@ -0,0 +1,57 @@ +IPv6 Routed-Forwarding +====================== + +This section includes summary graphs of VPP Phy-to-Phy packet latency +with IPv6 Routed-Forwarding measured at 50% of discovered NDR throughput +rate. Latency is reported for VPP running in multiple configurations of +VPP worker thread(s), a.k.a. VPP data plane thread(s), and their +physical CPU core(s) placement. + +VPP packet latency in 1t1c setup (1thread, 1core) is presented in the graph below. + +.. raw:: html + + + +*Figure 1. VPP 1thread 1core - packet latency for Phy-to-Phy IPv6 Routed-Forwarding.* + +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + +.. code-block:: bash + + $ cd $CSIT/tests/vpp/perf/ip6 + $ grep -E "78B-1t1c-ethip6-ip6[a-z0-9]+-[a-z-]*ndrdisc" * + + 10ge2p1x520-ethip6-ip6base-copwhtlistbase-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-copwhtlistbase-ndrdisc + 10ge2p1x520-ethip6-ip6base-iacldstbase-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-iacldstbase-ndrdisc + 10ge2p1x520-ethip6-ip6base-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-ndrdisc + 10ge2p1x520-ethip6-ip6scale200k-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6scale200k-ndrdisc + 10ge2p1x520-ethip6-ip6scale20k-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6scale20k-ndrdisc + 10ge2p1x520-ethip6-ip6scale2m-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6scale2m-ndrdisc + 40ge2p1xl710-ethip6-ip6base-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-ndrdisc + +VPP packet latency in 2t2c setup (2thread, 2core) is presented in the graph below. + +.. raw:: html + + + +*Figure 2. VPP 2threads 2cores - packet latency for Phy-to-Phy IPv6 Routed-Forwarding.* + +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + +.. code-block:: bash + + $ cd $CSIT/tests/vpp/perf/ip6 + $ grep -E "78B-2t2c-ethip6-ip6[a-z0-9]+-[a-z-]*ndrdisc" * + + 10ge2p1x520-ethip6-ip6base-copwhtlistbase-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-copwhtlistbase-ndrdisc + 10ge2p1x520-ethip6-ip6base-iacldstbase-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-iacldstbase-ndrdisc + 10ge2p1x520-ethip6-ip6base-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-ndrdisc + 10ge2p1x520-ethip6-ip6scale200k-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6scale200k-ndrdisc + 10ge2p1x520-ethip6-ip6scale20k-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6scale20k-ndrdisc + 10ge2p1x520-ethip6-ip6scale2m-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6scale2m-ndrdisc + 40ge2p1xl710-ethip6-ip6base-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-ndrdisc + diff --git a/docs/report/vpp_performance_tests/packet_latency_graphs/ip6_tunnels.rst b/docs/report/vpp_performance_tests/packet_latency_graphs/ip6_tunnels.rst new file mode 100644 index 0000000000..aa22f72aa6 --- /dev/null +++ b/docs/report/vpp_performance_tests/packet_latency_graphs/ip6_tunnels.rst @@ -0,0 +1,47 @@ +IPv6 Overlay Tunnels +==================== + +This section includes summary graphs of VPP Phy-to-Phy packet latency +with IPv6 Overlay Tunnels measured at 50% of discovered NDR throughput +rate. Latency is reported for VPP running in multiple configurations of +VPP worker thread(s), a.k.a. VPP data plane thread(s), and their +physical CPU core(s) placement. + +VPP packet latency in 1t1c setup (1thread, 1core) is presented in the graph below. + +.. raw:: html + + + +*Figure 1. VPP 1thread 1core - packet latency for Phy-to-Phy IPv6 Overlay Tunnels.* + +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + +.. code-block:: bash + + $ cd $CSIT/tests/vpp/perf/ipv6_tunnels + $ grep -E "78B-1t1c-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc" * + + 10ge2p1x520-ethip6lispip4-ip6base-ndrdisc.robot:| tc01-78B-1t1c-ethip6lispip4-ip6base-ndrdisc + 10ge2p1x520-ethip6lispip6-ip6base-ndrdisc.robot:| tc01-78B-1t1c-ethip6lispip6-ip6base-ndrdisc + +VPP packet latency in 2t2c setup (2thread, 2core) is presented in the graph below. + +.. raw:: html + + + +*Figure 2. VPP 2threads 2cores - packet latency for Phy-to-Phy IPv6 Overlay Tunnels.* + +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + +.. code-block:: bash + + $ cd $CSIT/tests/vpp/perf/ipv6_tunnels + $ grep -E "78B-2t2c-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc" * + + 10ge2p1x520-ethip6lispip4-ip6base-ndrdisc.robot:| tc07-78B-2t2c-ethip6lispip4-ip6base-ndrdisc + 10ge2p1x520-ethip6lispip6-ip6base-ndrdisc.robot:| tc07-78B-2t2c-ethip6lispip6-ip6base-ndrdisc + diff --git a/docs/report/vpp_performance_tests/packet_latency_graphs/ipsec.rst b/docs/report/vpp_performance_tests/packet_latency_graphs/ipsec.rst index a792cac907..1b552c13b2 100644 --- a/docs/report/vpp_performance_tests/packet_latency_graphs/ipsec.rst +++ b/docs/report/vpp_performance_tests/packet_latency_graphs/ipsec.rst @@ -10,14 +10,6 @@ for VPP running in multiple configurations of VPP worker thread(s), a.k.a. VPP data plane thread(s), and their physical CPU core(s) placement. -.. note:: - - Test results have been generated by FD.io test executor jobs - `csit-vpp-perf-1704-all - `_, - with Robot Framework result files csit-vpp-perf-1704-all-.zip - `archived here <../../_static/archive/>`_. - VPP packet latency in 1t1c setup (1thread, 1core) is presented in the graph below. @@ -35,14 +27,14 @@ git repository: $ cd $CSIT/tests/vpp/perf/crypto $ grep -E "64B-1t1c-.*ipsec.*-ndrdisc" * - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecscale1ip4-ip4base-interfaces-aes-gcm-ndrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc03-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-interfaces-aes-gcm-ndrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-cbc-sha1-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecscale1ip4-ip4base-interfaces-cbc-sha1-ndrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-cbc-sha1-ndrpdrdisc.robot:| tc03-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-interfaces-cbc-sha1-ndrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-aes-gcm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecscale1ip4-ip4base-tunnels-aes-gcm-ndrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-aes-gcm-ndrpdrdisc.robot:| tc03-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-tunnels-aes-gcm-ndrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-cbc-sha1-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecscale1ip4-ip4base-tunnels-cbc-sha1-ndrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-cbc-sha1-ndrpdrdisc.robot:| tc03-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-tunnels-cbc-sha1-ndrdisc + 40ge2p1xl710-ethip4ipsecbasetnl-ip4base-int-aes-gcm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecbasetnl-ip4base-int-aes-gcm-ndrdisc + 40ge2p1xl710-ethip4ipsecbasetnl-ip4base-int-cbc-sha1-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecbasetnl-ip4base-int-cbc-sha1-ndrdisc + 40ge2p1xl710-ethip4ipsecbasetnl-ip4base-tnl-aes-gcm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecbasetnl-ip4base-tnl-aes-gcm-ndrdisc + 40ge2p1xl710-ethip4ipsecbasetnl-ip4base-tnl-cbc-sha1-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecbasetnl-ip4base-tnl-cbc-sha1-ndrdisc + 40ge2p1xl710-ethip4ipsecscale1000tnl-ip4base-int-aes-gcm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecscale1000tnl-ip4base-int-aes-gcm-ndrdisc + 40ge2p1xl710-ethip4ipsecscale1000tnl-ip4base-int-cbc-sha1-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecscale1000tnl-ip4base-int-cbc-sha1-ndrdisc + 40ge2p1xl710-ethip4ipsecscale1000tnl-ip4base-tnl-aes-gcm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecscale1000tnl-ip4base-tnl-aes-gcm-ndrdisc + 40ge2p1xl710-ethip4ipsecscale1000tnl-ip4base-tnl-cbc-sha1-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecscale1000tnl-ip4base-tnl-cbc-sha1-ndrdisc 40ge2p1xl710-ethip4ipsectptlispgpe-ip4base-cbc-sha1-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsectptlispgpe-ip4base-cbc-sha1-ndrdisc @@ -62,13 +54,13 @@ git repository: $ cd $CSIT/tests/vpp/perf/crypto $ grep -E "64B-2t2c-.*ipsec.*-ndrdisc" * - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc13-64B-2t2c-ethip4ipsecscale1ip4-ip4base-interfaces-aes-gcm-ndrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc15-64B-2t2c-ethip4ipsecscale1000ip4-ip4base-interfaces-aes-gcm-ndrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-cbc-sha1-ndrpdrdisc.robot:| tc13-64B-2t2c-ethip4ipsecscale1ip4-ip4base-interfaces-cbc-sha1-ndrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-cbc-sha1-ndrpdrdisc.robot:| tc15-64B-2t2c-ethip4ipsecscale1000ip4-ip4base-interfaces-cbc-sha1-ndrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-aes-gcm-ndrpdrdisc.robot:| tc13-64B-2t2c-ethip4ipsecscale1ip4-ip4base-tunnels-aes-gcm-ndrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-aes-gcm-ndrpdrdisc.robot:| tc15-64B-2t2c-ethip4ipsecscale1000ip4-ip4base-tunnels-aes-gcm-ndrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-cbc-sha1-ndrpdrdisc.robot:| tc13-64B-2t2c-ethip4ipsecscale1ip4-ip4base-tunnels-cbc-sha1-ndrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-cbc-sha1-ndrpdrdisc.robot:| tc15-64B-2t2c-ethip4ipsecscale1000ip4-ip4base-tunnels-cbc-sha1-ndrdisc + 40ge2p1xl710-ethip4ipsecbasetnl-ip4base-int-aes-gcm-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4ipsecbasetnl-ip4base-int-aes-gcm-ndrdisc + 40ge2p1xl710-ethip4ipsecbasetnl-ip4base-int-cbc-sha1-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4ipsecbasetnl-ip4base-int-cbc-sha1-ndrdisc + 40ge2p1xl710-ethip4ipsecbasetnl-ip4base-tnl-aes-gcm-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4ipsecbasetnl-ip4base-tnl-aes-gcm-ndrdisc + 40ge2p1xl710-ethip4ipsecbasetnl-ip4base-tnl-cbc-sha1-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4ipsecbasetnl-ip4base-tnl-cbc-sha1-ndrdisc + 40ge2p1xl710-ethip4ipsecscale1000tnl-ip4base-int-aes-gcm-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4ipsecscale1000tnl-ip4base-int-aes-gcm-ndrdisc + 40ge2p1xl710-ethip4ipsecscale1000tnl-ip4base-int-cbc-sha1-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4ipsecscale1000tnl-ip4base-int-cbc-sha1-ndrdisc + 40ge2p1xl710-ethip4ipsecscale1000tnl-ip4base-tnl-aes-gcm-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4ipsecscale1000tnl-ip4base-tnl-aes-gcm-ndrdisc + 40ge2p1xl710-ethip4ipsecscale1000tnl-ip4base-tnl-cbc-sha1-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4ipsecscale1000tnl-ip4base-tnl-cbc-sha1-ndrdisc 40ge2p1xl710-ethip4ipsectptlispgpe-ip4base-cbc-sha1-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4ipsectptlispgpe-ip4base-cbc-sha1-ndrdisc diff --git a/docs/report/vpp_performance_tests/packet_latency_graphs/ipv4.rst b/docs/report/vpp_performance_tests/packet_latency_graphs/ipv4.rst deleted file mode 100644 index 669c8a95e3..0000000000 --- a/docs/report/vpp_performance_tests/packet_latency_graphs/ipv4.rst +++ /dev/null @@ -1,60 +0,0 @@ -IPv4 Routed-Forwarding -====================== - -This section includes summary graphs of VPP Phy-to-Phy packet latency -with IPv4 Routed-Forwarding measured at 50% of discovered NDR throughput -rate. Latency is reported for VPP running in multiple configurations of -VPP worker thread(s), a.k.a. VPP data plane thread(s), and their -physical CPU core(s) placement. - -VPP packet latency in 1t1c setup (1thread, 1core) is presented in the graph below. - -.. raw:: html - - - -*Figure 1. VPP 1thread 1core - packet latency for Phy-to-Phy IPv4 Routed-Forwarding.* - -CSIT source code for the test cases used for above plots can be found in CSIT -git repository: - -.. code-block:: bash - - $ cd $CSIT/tests/vpp/perf/ipv4 - $ grep -P '64B-1t1c-ethip4-ip4(base|scale)[a-z0-9]*(?!-eth-[0-9]vhost).*-ndrdisc' * - - 10ge2p1x520-ethip4-ip4base-copwhtlistbase-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-copwhtlistbase-ndrdisc - 10ge2p1x520-ethip4-ip4base-iacldstbase-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-iacldstbase-ndrdisc - 10ge2p1x520-ethip4-ip4base-ipolicemarkbase-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ipolicemarkbase-ndrdisc - 10ge2p1x520-ethip4-ip4base-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ndrdisc - 10ge2p1x520-ethip4-ip4base-snat-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-snat-1u-1p-ndrdisc - 10ge2p1x520-ethip4-ip4scale200k-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale200k-ndrdisc - 10ge2p1x520-ethip4-ip4scale20k-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale20k-ndrdisc - 10ge2p1x520-ethip4-ip4scale2m-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale2m-ndrdisc - 10ge2p1x520-ethip4-ip4scale-snat-ndrpdrdisc.robot:| tc11-64B-1t1c-ethip4-ip4base-snat-4000u-15p-ndrdisc - 40ge2p1xl710-ethip4-ip4base-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ndrdisc - -VPP packet latency in 2t2c setup (2thread, 2core) is presented in the graph below. - -.. raw:: html - - - -*Figure 2. VPP 2threads 2cores - packet latency for Phy-to-Phy IPv4 Routed-Forwarding.* - -CSIT source code for the test cases used for above plots can be found in CSIT -git repository: - -.. code-block:: bash - - $ cd $CSIT/tests/vpp/perf/ipv4 - $ grep -P '64B-2t2c-ethip4-ip4(base|scale)[a-z0-9]*(?!-eth-[0-9]vhost).*-ndrdisc' * - - 10ge2p1x520-ethip4-ip4base-copwhtlistbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-copwhtlistbase-ndrdisc - 10ge2p1x520-ethip4-ip4base-iacldstbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-iacldstbase-ndrdisc - 10ge2p1x520-ethip4-ip4base-ipolicemarkbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ipolicemarkbase-ndrdisc - 10ge2p1x520-ethip4-ip4base-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ndrdisc - 10ge2p1x520-ethip4-ip4scale200k-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale200k-ndrdisc - 10ge2p1x520-ethip4-ip4scale20k-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale20k-ndrdisc - 10ge2p1x520-ethip4-ip4scale2m-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale2m-ndrdisc - 40ge2p1xl710-ethip4-ip4base-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ndrdisc diff --git a/docs/report/vpp_performance_tests/packet_latency_graphs/ipv4_tunnels.rst b/docs/report/vpp_performance_tests/packet_latency_graphs/ipv4_tunnels.rst deleted file mode 100644 index 6860fedc9a..0000000000 --- a/docs/report/vpp_performance_tests/packet_latency_graphs/ipv4_tunnels.rst +++ /dev/null @@ -1,51 +0,0 @@ -IPv4 Overlay Tunnels -==================== - -This section includes summary graphs of VPP Phy-to-Phy packet latency -with IPv4 Overlay Tunnels measured at 50% of discovered NDR throughput -rate. Latency is reported for VPP running in multiple configurations of -VPP worker thread(s), a.k.a. VPP data plane thread(s), and their -physical CPU core(s) placement. - -VPP packet latency in 1t1c setup (1thread, 1core) is presented in the graph below. - -.. raw:: html - - - -*Figure 1. VPP 1thread 1core - packet latency for Phy-to-Phy IPv4 Overlay Tunnels.* - -CSIT source code for the test cases used for above plots can be found in CSIT -git repository: - -.. code-block:: bash - - $ cd $CSIT/tests/vpp/perf/ipv4_tunnels - $ grep -E "64B-1t1c-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc" * - - 10ge2p1x520-ethip4lispip4-ip4base-ndrdisc.robot:| tc01-64B-1t1c-ethip4lispip4-ip4base-ndrdisc - 10ge2p1x520-ethip4lispip6-ip4base-ndrdisc.robot:| tc01-64B-1t1c-ethip4lispip6-ip4base-ndrdisc - 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrdisc.robot:| tc01-64B-1t1c-ethip4vxlan-l2bdbasemaclrn-ndrdisc - 10ge2p1x520-ethip4vxlan-l2xcbase-ndrdisc.robot:| tc01-64B-1t1c-ethip4vxlan-l2xcbase-ndrdisc - -VPP packet latency in 2t2c setup (2thread, 2core) is presented in the graph below. - -.. raw:: html - - - -*Figure 2. VPP 2threads 2cores - packet latency for Phy-to-Phy IPv4 Overlay Tunnels.* - -CSIT source code for the test cases used for above plots can be found in CSIT -git repository: - -.. code-block:: bash - - $ cd $CSIT/tests/vpp/perf/ipv4_tunnels - $ grep -E "64B-2t2c-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc" * - - 10ge2p1x520-ethip4lispip4-ip4base-ndrdisc.robot:| tc07-64B-2t2c-ethip4lispip4-ip4base-ndrdisc - 10ge2p1x520-ethip4lispip6-ip4base-ndrdisc.robot:| tc07-64B-2t2c-ethip4lispip6-ip4base-ndrdisc - 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrdisc.robot:| tc07-64B-2t2c-ethip4vxlan-l2bdbasemaclrn-ndrdisc - 10ge2p1x520-ethip4vxlan-l2xcbase-ndrdisc.robot:| tc07-64B-2t2c-ethip4vxlan-l2xcbase-ndrdisc - diff --git a/docs/report/vpp_performance_tests/packet_latency_graphs/ipv6.rst b/docs/report/vpp_performance_tests/packet_latency_graphs/ipv6.rst deleted file mode 100644 index a8e8ce7b5a..0000000000 --- a/docs/report/vpp_performance_tests/packet_latency_graphs/ipv6.rst +++ /dev/null @@ -1,58 +0,0 @@ -IPv6 Routed-Forwarding -====================== - -This section includes summary graphs of VPP Phy-to-Phy packet latency -with IPv6 Routed-Forwarding measured at 50% of discovered NDR throughput -rate. Latency is reported for VPP running in multiple configurations of -VPP worker thread(s), a.k.a. VPP data plane thread(s), and their -physical CPU core(s) placement. - -VPP packet latency in 1t1c setup (1thread, 1core) is presented in the graph below. - -.. raw:: html - - - -*Figure 1. VPP 1thread 1core - packet latency for Phy-to-Phy IPv6 Routed-Forwarding.* - -CSIT source code for the test cases used for above plots can be found in CSIT -git repository: - -.. code-block:: bash - - $ cd $CSIT/tests/vpp/perf/ipv6 - $ grep -E "78B-1t1c-ethip6-ip6[a-z0-9]+-[a-z-]*ndrdisc" * - - 10ge2p1x520-ethip6-ip6base-copwhtlistbase-ndrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-copwhtlistbase-ndrdisc - 10ge2p1x520-ethip6-ip6base-iacldstbase-ndrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-iacldstbase-ndrdisc - 10ge2p1x520-ethip6-ip6base-ndrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-ndrdisc - 10ge2p1x520-ethip6-ip6scale200k-ndrdisc.robot:| tc01-78B-1t1c-ethip6-ip6scale200k-ndrdisc - 10ge2p1x520-ethip6-ip6scale20k-ndrdisc.robot:| tc01-78B-1t1c-ethip6-ip6scale20k-ndrdisc - 10ge2p1x520-ethip6-ip6scale2m-ndrdisc.robot:| tc01-78B-1t1c-ethip6-ip6scale2m-ndrdisc - 40ge2p1xl710-ethip6-ip6base-ndrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-ndrdisc - -VPP packet latency in 2t2c setup (2thread, 2core) is presented in the graph below. - -.. raw:: html - - - -*Figure 2. VPP 2threads 2cores - packet latency for Phy-to-Phy IPv6 Routed-Forwarding.* - -CSIT source code for the test cases used for above plots can be found in CSIT -git repository: - -.. code-block:: bash - - $ cd $CSIT/tests/vpp/perf/ipv6 - $ grep -E "78B-2t2c-ethip6-ip6[a-z0-9]+-[a-z-]*ndrdisc" * - - 10ge2p1x520-ethip6-ip6base-copwhtlistbase-ndrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-copwhtlistbase-ndrdisc - 10ge2p1x520-ethip6-ip6base-iacldstbase-ndrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-iacldstbase-ndrdisc - 10ge2p1x520-ethip6-ip6base-ipolicemarkbase-ndrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-ipolicemarkbase-ndrdisc - 10ge2p1x520-ethip6-ip6base-ndrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-ndrdisc - 10ge2p1x520-ethip6-ip6scale200k-ndrdisc.robot:| tc07-78B-2t2c-ethip6-ip6scale200k-ndrdisc - 10ge2p1x520-ethip6-ip6scale20k-ndrdisc.robot:| tc07-78B-2t2c-ethip6-ip6scale20k-ndrdisc - 10ge2p1x520-ethip6-ip6scale2m-ndrdisc.robot:| tc07-78B-2t2c-ethip6-ip6scale2m-ndrdisc - 40ge2p1xl710-ethip6-ip6base-ndrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-ndrdisc - diff --git a/docs/report/vpp_performance_tests/packet_latency_graphs/ipv6_tunnels.rst b/docs/report/vpp_performance_tests/packet_latency_graphs/ipv6_tunnels.rst deleted file mode 100644 index aa22f72aa6..0000000000 --- a/docs/report/vpp_performance_tests/packet_latency_graphs/ipv6_tunnels.rst +++ /dev/null @@ -1,47 +0,0 @@ -IPv6 Overlay Tunnels -==================== - -This section includes summary graphs of VPP Phy-to-Phy packet latency -with IPv6 Overlay Tunnels measured at 50% of discovered NDR throughput -rate. Latency is reported for VPP running in multiple configurations of -VPP worker thread(s), a.k.a. VPP data plane thread(s), and their -physical CPU core(s) placement. - -VPP packet latency in 1t1c setup (1thread, 1core) is presented in the graph below. - -.. raw:: html - - - -*Figure 1. VPP 1thread 1core - packet latency for Phy-to-Phy IPv6 Overlay Tunnels.* - -CSIT source code for the test cases used for above plots can be found in CSIT -git repository: - -.. code-block:: bash - - $ cd $CSIT/tests/vpp/perf/ipv6_tunnels - $ grep -E "78B-1t1c-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc" * - - 10ge2p1x520-ethip6lispip4-ip6base-ndrdisc.robot:| tc01-78B-1t1c-ethip6lispip4-ip6base-ndrdisc - 10ge2p1x520-ethip6lispip6-ip6base-ndrdisc.robot:| tc01-78B-1t1c-ethip6lispip6-ip6base-ndrdisc - -VPP packet latency in 2t2c setup (2thread, 2core) is presented in the graph below. - -.. raw:: html - - - -*Figure 2. VPP 2threads 2cores - packet latency for Phy-to-Phy IPv6 Overlay Tunnels.* - -CSIT source code for the test cases used for above plots can be found in CSIT -git repository: - -.. code-block:: bash - - $ cd $CSIT/tests/vpp/perf/ipv6_tunnels - $ grep -E "78B-2t2c-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc" * - - 10ge2p1x520-ethip6lispip4-ip6base-ndrdisc.robot:| tc07-78B-2t2c-ethip6lispip4-ip6base-ndrdisc - 10ge2p1x520-ethip6lispip6-ip6base-ndrdisc.robot:| tc07-78B-2t2c-ethip6lispip6-ip6base-ndrdisc - diff --git a/docs/report/vpp_performance_tests/packet_latency_graphs/l2.rst b/docs/report/vpp_performance_tests/packet_latency_graphs/l2.rst index 3b56612a13..056d3b4665 100644 --- a/docs/report/vpp_performance_tests/packet_latency_graphs/l2.rst +++ b/docs/report/vpp_performance_tests/packet_latency_graphs/l2.rst @@ -21,17 +21,54 @@ git repository: .. code-block:: bash $ cd $CSIT/tests/vpp/perf/l2 - $ grep -E "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" * - - 10ge2p1vic1227-eth-l2bdbasemaclrn-ndrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc - 10ge2p1x520-dot1ad-l2xcbase-ndrdisc.robot:| tc01-64B-1t1c-dot1ad-l2xcbase-ndrdisc - 10ge2p1x520-dot1q-l2xcbase-ndrdisc.robot:| tc01-64B-1t1c-dot1q-l2xcbase-ndrdisc - 10ge2p1x520-eth-l2bdbasemaclrn-ndrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc - 10ge2p1x520-eth-l2xcbase-ndrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-ndrdisc - 10ge2p1x710-eth-l2bdbasemaclrn-ndrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc - 40ge2p1vic1385-eth-l2bdbasemaclrn-ndrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc - 40ge2p1xl710-eth-l2bdbasemaclrn-ndrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc - 40ge2p1xl710-eth-l2xcbase-ndrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-ndrdisc + $ grep -E "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-(iacl.*|oacl.*|eth.*)*ndrdisc" * + + 10ge2p1vic1227-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc + 10ge2p1x520-dot1ad-l2xcbase-ndrpdrdisc.robot:| tc01-64B-1t1c-dot1ad-l2xcbase-ndrdisc + 10ge2p1x520-dot1q-l2xcbase-ndrpdrdisc.robot:| tc01-64B-1t1c-dot1q-l2xcbase-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-iacl1-stateless-flows100-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc03-64B-1t1c-eth-l2bdbasemaclrn-iacl10-stateless-flows100-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc05-64B-1t1c-eth-l2bdbasemaclrn-iacl50-stateless-flows100-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc07-64B-1t1c-eth-l2bdbasemaclrn-iacl1-stateless-flows10k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc09-64B-1t1c-eth-l2bdbasemaclrn-iacl10-stateless-flows10k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc11-64B-1t1c-eth-l2bdbasemaclrn-iacl50-stateless-flows10k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc13-64B-1t1c-eth-l2bdbasemaclrn-iacl1-stateless-flows100k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc15-64B-1t1c-eth-l2bdbasemaclrn-iacl10-stateless-flows100k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc17-64B-1t1c-eth-l2bdbasemaclrn-iacl50-stateless-flows100k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-iacl1-statefull-flows100-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc03-64B-1t1c-eth-l2bdbasemaclrn-iacl10-statefull-flows100-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc05-64B-1t1c-eth-l2bdbasemaclrn-iacl50-statefull-flows100-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc07-64B-1t1c-eth-l2bdbasemaclrn-iacl1-statefull-flows10k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc09-64B-1t1c-eth-l2bdbasemaclrn-iacl10-statefull-flows10k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc11-64B-1t1c-eth-l2bdbasemaclrn-iacl50-statefull-flows10k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc13-64B-1t1c-eth-l2bdbasemaclrn-iacl1-statefull-flows100k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc15-64B-1t1c-eth-l2bdbasemaclrn-iacl10-statefull-flows100k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc17-64B-1t1c-eth-l2bdbasemaclrn-iacl50-statefull-flows100k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-oacl1-stateless-flows100-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc03-64B-1t1c-eth-l2bdbasemaclrn-oacl10-stateless-flows100-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc05-64B-1t1c-eth-l2bdbasemaclrn-oacl50-stateless-flows100-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc07-64B-1t1c-eth-l2bdbasemaclrn-oacl1-stateless-flows10k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc09-64B-1t1c-eth-l2bdbasemaclrn-oacl10-stateless-flows10k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc11-64B-1t1c-eth-l2bdbasemaclrn-oacl50-stateless-flows10k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc13-64B-1t1c-eth-l2bdbasemaclrn-oacl1-stateless-flows100k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc15-64B-1t1c-eth-l2bdbasemaclrn-oacl10-stateless-flows100k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc17-64B-1t1c-eth-l2bdbasemaclrn-oacl50-stateless-flows100k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-oacl1-statefull-flows100-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc03-64B-1t1c-eth-l2bdbasemaclrn-oacl10-statefull-flows100-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc05-64B-1t1c-eth-l2bdbasemaclrn-oacl50-statefull-flows100-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc07-64B-1t1c-eth-l2bdbasemaclrn-oacl1-statefull-flows10k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc09-64B-1t1c-eth-l2bdbasemaclrn-oacl10-statefull-flows10k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc11-64B-1t1c-eth-l2bdbasemaclrn-oacl50-statefull-flows10k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc13-64B-1t1c-eth-l2bdbasemaclrn-oacl1-statefull-flows100k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc15-64B-1t1c-eth-l2bdbasemaclrn-oacl10-statefull-flows100k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc17-64B-1t1c-eth-l2bdbasemaclrn-oacl50-statefull-flows100k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc + 10ge2p1x520-eth-l2xcbase-eth-2memif-1lxc-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-eth-2memif-1lxc-ndrdisc + 10ge2p1x520-eth-l2xcbase-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-ndrdisc + 10ge2p1x710-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc + 40ge2p1vic1385-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc + 40ge2p1xl710-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc + 40ge2p1xl710-eth-l2xcbase-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-ndrdisc VPP packet latency in 2t2c setup (2thread, 2core) is presented in the graph below. @@ -47,15 +84,16 @@ git repository: .. code-block:: bash $ cd $CSIT/tests/vpp/perf/l2 - $ grep -E "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" * - - 10ge2p1vic1227-eth-l2bdbasemaclrn-ndrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc - 10ge2p1x520-dot1ad-l2xcbase-ndrdisc.robot:| tc07-64B-2t2c-dot1ad-l2xcbase-ndrdisc - 10ge2p1x520-dot1q-l2xcbase-ndrdisc.robot:| tc07-64B-2t2c-dot1q-l2xcbase-ndrdisc - 10ge2p1x520-eth-l2bdbasemaclrn-ndrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc - 10ge2p1x520-eth-l2xcbase-ndrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-ndrdisc - 10ge2p1x710-eth-l2bdbasemaclrn-ndrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc - 40ge2p1vic1385-eth-l2bdbasemaclrn-ndrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc - 40ge2p1xl710-eth-l2bdbasemaclrn-ndrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc - 40ge2p1xl710-eth-l2xcbase-ndrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-ndrdisc + $ grep -E "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-(iacl.*|oacl.*|eth.*)*ndrdisc" * + + 10ge2p1vic1227-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc + 10ge2p1x520-dot1ad-l2xcbase-ndrpdrdisc.robot:| tc07-64B-2t2c-dot1ad-l2xcbase-ndrdisc + 10ge2p1x520-dot1q-l2xcbase-ndrpdrdisc.robot:| tc07-64B-2t2c-dot1q-l2xcbase-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc + 10ge2p1x520-eth-l2xcbase-eth-2memif-1lxc-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-eth-2memif-1lxc-ndrdisc + 10ge2p1x520-eth-l2xcbase-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-ndrdisc + 10ge2p1x710-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc + 40ge2p1vic1385-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc + 40ge2p1xl710-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc + 40ge2p1xl710-eth-l2xcbase-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-ndrdisc diff --git a/docs/report/vpp_performance_tests/packet_latency_graphs/vm_vhost.rst b/docs/report/vpp_performance_tests/packet_latency_graphs/vm_vhost.rst index 59b5e8a832..01ee2a9325 100644 --- a/docs/report/vpp_performance_tests/packet_latency_graphs/vm_vhost.rst +++ b/docs/report/vpp_performance_tests/packet_latency_graphs/vm_vhost.rst @@ -26,12 +26,21 @@ git repository: 10ge2p1x520-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc 10ge2p1x520-dot1q-l2xcbase-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc - 10ge2p1x520-ethip4-ip4base-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-eth-2vhost-1vm-ndrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhostvr1024-1vm-cfsrr1-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-eth-2vhostvr1024-1vm-cfsrr1-ndrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhostvr1024-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-eth-2vhostvr1024-1vm-ndrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhostvr256-1vm-cfsrr1-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-eth-2vhostvr256-1vm-cfsrr1-ndrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhostvr256-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-eth-2vhostvr256-1vm-ndrdisc 10ge2p1x520-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-eth-4vhost-2vm-ndrdisc 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc - 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhostvr1024-1vm-cfsrr1-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhostvr1024-1vm-cfsrr1-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhostvr1024-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhostvr1024-1vm-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhostvr256-1vm-cfsrr1-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhostvr256-1vm-cfsrr1-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhostvr256-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhostvr256-1vm-ndrdisc 10ge2p1x520-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrdisc - 10ge2p1x520-eth-l2xcbase-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc + 10ge2p1x520-eth-l2xcbase-eth-2vhostvr1024-1vm-cfsrr1-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-eth-2vhostvr1024-1vm-cfsrr1-ndrdisc + 10ge2p1x520-eth-l2xcbase-eth-2vhostvr1024-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-eth-2vhostvr1024-1vm-ndrdisc + 10ge2p1x520-eth-l2xcbase-eth-2vhostvr256-1vm-cfsrr1-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-eth-2vhostvr256-1vm-cfsrr1-ndrdisc + 10ge2p1x520-eth-l2xcbase-eth-2vhostvr256-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-eth-2vhostvr256-1vm-ndrdisc 10ge2p1x520-eth-l2xcbase-eth-4vhost-2vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-eth-4vhost-2vm-ndrdisc 10ge2p1x710-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc 40ge2p1xl710-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-ip4base-eth-4vhost-2vm-ndrdisc @@ -57,12 +66,21 @@ git repository: 10ge2p1x520-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc 10ge2p1x520-dot1q-l2xcbase-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc - 10ge2p1x520-ethip4-ip4base-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-eth-2vhost-1vm-ndrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhostvr1024-1vm-cfsrr1-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-eth-2vhostvr1024-1vm-cfsrr1-ndrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhostvr1024-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-eth-2vhostvr1024-1vm-ndrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhostvr256-1vm-cfsrr1-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-eth-2vhostvr256-1vm-cfsrr1-ndrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhostvr256-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-eth-2vhostvr256-1vm-ndrdisc 10ge2p1x520-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-eth-4vhost-2vm-ndrdisc 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc - 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhostvr1024-1vm-cfsrr1-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhostvr1024-1vm-cfsrr1-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhostvr1024-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhostvr1024-1vm-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhostvr256-1vm-cfsrr1-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhostvr256-1vm-cfsrr1-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhostvr256-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhostvr256-1vm-ndrdisc 10ge2p1x520-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrdisc - 10ge2p1x520-eth-l2xcbase-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc + 10ge2p1x520-eth-l2xcbase-eth-2vhostvr1024-1vm-cfsrr1-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-eth-2vhostvr1024-1vm-cfsrr1-ndrdisc + 10ge2p1x520-eth-l2xcbase-eth-2vhostvr1024-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-eth-2vhostvr1024-1vm-ndrdisc + 10ge2p1x520-eth-l2xcbase-eth-2vhostvr256-1vm-cfsrr1-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-eth-2vhostvr256-1vm-cfsrr1-ndrdisc + 10ge2p1x520-eth-l2xcbase-eth-2vhostvr256-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-eth-2vhostvr256-1vm-ndrdisc 10ge2p1x520-eth-l2xcbase-eth-4vhost-2vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-eth-4vhost-2vm-ndrdisc 10ge2p1x710-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc 40ge2p1xl710-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-ip4base-eth-4vhost-2vm-ndrdisc diff --git a/docs/report/vpp_performance_tests/packet_throughput_graphs/index.rst b/docs/report/vpp_performance_tests/packet_throughput_graphs/index.rst index c203aa1144..91191ef412 100644 --- a/docs/report/vpp_performance_tests/packet_throughput_graphs/index.rst +++ b/docs/report/vpp_performance_tests/packet_throughput_graphs/index.rst @@ -37,9 +37,9 @@ and their indices. .. toctree:: l2 - ipv4 - ipv6 - ipv4_tunnels - ipv6_tunnels + ip4 + ip6 + ip4_tunnels + ip6_tunnels vm_vhost ipsec diff --git a/docs/report/vpp_performance_tests/packet_throughput_graphs/ip4.rst b/docs/report/vpp_performance_tests/packet_throughput_graphs/ip4.rst new file mode 100644 index 0000000000..a2f37af649 --- /dev/null +++ b/docs/report/vpp_performance_tests/packet_throughput_graphs/ip4.rst @@ -0,0 +1,123 @@ +IPv4 Routed-Forwarding +====================== + +Following sections include summary graphs of VPP Phy-to-Phy performance +with IPv4 Routed-Forwarding, including NDR throughput (zero packet loss) +and PDR throughput (<0.5% packet loss). Performance is reported for VPP +running in multiple configurations of VPP worker thread(s), a.k.a. VPP +data plane thread(s), and their physical CPU core(s) placement. + +NDR Throughput +~~~~~~~~~~~~~~ + +VPP NDR 64B packet throughput in 1t1c setup (1thread, 1core) is presented +in the graph below. + +.. raw:: html + + + +*Figure 1. VPP 1thread 1core - NDR Throughput for Phy-to-Phy IPv4 Routed-Forwarding.* + +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + +.. code-block:: bash + + $ cd $CSIT/tests/vpp/perf/ip4 + $ grep -P '64B-1t1c-ethip4-ip4(base|scale)[a-z0-9]*(?!-eth-[0-9]vhost).*-ndrdisc' * + + 10ge2p1x520-ethip4-ip4base-copwhtlistbase-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-copwhtlistbase-ndrdisc + 10ge2p1x520-ethip4-ip4base-iacldstbase-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-iacldstbase-ndrdisc + 10ge2p1x520-ethip4-ip4base-ipolicemarkbase-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ipolicemarkbase-ndrdisc + 10ge2p1x520-ethip4-ip4base-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ndrdisc + 10ge2p1x520-ethip4-ip4base-snat-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-snat-1u-1p-ndrdisc + 10ge2p1x520-ethip4-ip4scale200k-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale200k-ndrdisc + 10ge2p1x520-ethip4-ip4scale20k-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale20k-ndrdisc + 10ge2p1x520-ethip4-ip4scale2m-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale2m-ndrdisc + 40ge2p1xl710-ethip4-ip4base-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ndrdisc + +VPP NDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented +in the graph below. + +.. raw:: html + + + +*Figure 2. VPP 2threads 2cores - NDR Throughput for Phy-to-Phy IPv4 +Routed-Forwarding.* + +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + +.. code-block:: bash + + $ cd $CSIT/tests/vpp/perf/ip4 + $ grep -P '64B-2t2c-ethip4-ip4(base|scale)[a-z0-9]*(?!-eth-[0-9]vhost).*-ndrdisc' * + + 10ge2p1x520-ethip4-ip4base-copwhtlistbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-copwhtlistbase-ndrdisc + 10ge2p1x520-ethip4-ip4base-iacldstbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-iacldstbase-ndrdisc + 10ge2p1x520-ethip4-ip4base-ipolicemarkbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ipolicemarkbase-ndrdisc + 10ge2p1x520-ethip4-ip4base-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ndrdisc + 10ge2p1x520-ethip4-ip4scale200k-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale200k-ndrdisc + 10ge2p1x520-ethip4-ip4scale20k-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale20k-ndrdisc + 10ge2p1x520-ethip4-ip4scale2m-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale2m-ndrdisc + 40ge2p1xl710-ethip4-ip4base-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ndrdisc + +PDR Throughput +~~~~~~~~~~~~~~ + +VPP PDR 64B packet throughput in 1t1c setup (1thread, 1core) is presented +in the graph below. PDR measured for 0.5% packet loss ratio. + +.. raw:: html + + + +*Figure 3. VPP 1thread 1core - PDR Throughput for Phy-to-Phy IPv4 +Routed-Forwarding.* + +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + +.. code-block:: bash + + $ cd $CSIT/tests/vpp/perf/ip4 + $ grep -P '64B-1t1c-ethip4-ip4(base|scale)[a-z0-9]*(?!-eth-[0-9]vhost).*-pdrdisc' * + + 10ge2p1x520-ethip4-ip4base-copwhtlistbase-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4base-copwhtlistbase-pdrdisc + 10ge2p1x520-ethip4-ip4base-iacldstbase-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4base-iacldstbase-pdrdisc + 10ge2p1x520-ethip4-ip4base-ipolicemarkbase-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4base-ipolicemarkbase-pdrdisc + 10ge2p1x520-ethip4-ip4base-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4base-pdrdisc + 10ge2p1x520-ethip4-ip4base-snat-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4base-snat-1u-1p-pdrdisc + 10ge2p1x520-ethip4-ip4scale200k-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4scale200k-pdrdisc + 10ge2p1x520-ethip4-ip4scale20k-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4scale20k-pdrdisc + 10ge2p1x520-ethip4-ip4scale2m-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4scale2m-pdrdisc + +VPP PDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented +in the graph below. PDR measured for 0.5% packet loss ratio. + +.. raw:: html + + + +*Figure 4. VPP 2thread 2core - PDR Throughput for Phy-to-Phy IPv4 +Routed-Forwarding.* + +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + +.. code-block:: bash + + $ cd $CSIT/tests/vpp/perf/ipv4 + $ grep -P '64B-2t2c-ethip4-ip4(base|scale)[a-z0-9]*(?!-eth-[0-9]vhost).*-pdrdisc' * + + 10ge2p1x520-ethip4-ip4base-copwhtlistbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-copwhtlistbase-pdrdisc + 10ge2p1x520-ethip4-ip4base-iacldstbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-iacldstbase-pdrdisc + 10ge2p1x520-ethip4-ip4base-ipolicemarkbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ipolicemarkbase-pdrdisc + 10ge2p1x520-ethip4-ip4base-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-pdrdisc + 10ge2p1x520-ethip4-ip4scale200k-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale200k-pdrdisc + 10ge2p1x520-ethip4-ip4scale20k-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale20k-pdrdisc + 10ge2p1x520-ethip4-ip4scale2m-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale2m-pdrdisc + 40ge2p1xl710-ethip4-ip4base-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-pdrdisc + diff --git a/docs/report/vpp_performance_tests/packet_throughput_graphs/ip4_tunnels.rst b/docs/report/vpp_performance_tests/packet_throughput_graphs/ip4_tunnels.rst new file mode 100644 index 0000000000..39432ca10b --- /dev/null +++ b/docs/report/vpp_performance_tests/packet_throughput_graphs/ip4_tunnels.rst @@ -0,0 +1,105 @@ +IPv4 Overlay Tunnels +==================== + +Following sections include summary graphs of VPP Phy-to-Phy performance +with IPv4 Overlay Tunnels, including NDR throughput (zero packet loss) +and PDR throughput (<0.5% packet loss). Performance is reported for VPP +running in multiple configurations of VPP worker thread(s), a.k.a. VPP +data plane thread(s), and their physical CPU core(s) placement. + +NDR Throughput +~~~~~~~~~~~~~~ + +VPP NDR 64B packet throughput in 1t1c setup (1thread, 1core) is presented +in the graph below. + +.. raw:: html + + + +*Figure 1. VPP 1thread 1core - NDR Throughput for Phy-to-Phy IPv4 Overlay +Tunnels.* + +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + +.. code-block:: bash + + $ cd $CSIT/tests/vpp/perf/ip4_tunnels + $ grep -E "64B-1t1c-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc" * + + 10ge2p1x520-ethip4lispip4-ip4base-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4lispip4-ip4base-ndrdisc + 10ge2p1x520-ethip4lispip6-ip4base-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4lispip6-ip4base-ndrdisc + 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4vxlan-l2bdbasemaclrn-ndrdisc + 10ge2p1x520-ethip4vxlan-l2xcbase-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4vxlan-l2xcbase-ndrdisc + +VPP NDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented +in the graph below. + +.. raw:: html + + + +*Figure 2. VPP 2threads 2cores - NDR Throughput for Phy-to-Phy IPv4 Overlay Tunnels.* + +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + +.. code-block:: bash + + $ cd $CSIT/tests/vpp/perf/ip4_tunnels + $ grep -E "64B-2t2c-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc" * + + 10ge2p1x520-ethip4lispip4-ip4base-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4lispip4-ip4base-ndrdisc + 10ge2p1x520-ethip4lispip6-ip4base-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4lispip6-ip4base-ndrdisc + 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4vxlan-l2bdbasemaclrn-ndrdisc + 10ge2p1x520-ethip4vxlan-l2xcbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4vxlan-l2xcbase-ndrdisc + +PDR Throughput +~~~~~~~~~~~~~~ + +VPP PDR 64B packet throughput in 1t1c setup (1thread, 1core) is presented +in the graph below. PDR measured for 0.5% packet loss ratio. + +.. raw:: html + + + +*Figure 3. VPP 1thread 1core - PDR Throughput for Phy-to-Phy IPv4 Overlay +Tunnels.* + +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + +.. code-block:: bash + + $ cd $CSIT/tests/vpp/perf/ip4_tunnels + $ grep -E "64B-1t1c-ethip4[a-z0-9]+-[a-z0-9]*-pdrdisc" * + + 10ge2p1x520-ethip4lispip4-ip4base-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4lispip4-ip4base-pdrdisc + 10ge2p1x520-ethip4lispip6-ip4base-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4lispip6-ip4base-pdrdisc + 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4vxlan-l2bdbasemaclrn-pdrdisc + 10ge2p1x520-ethip4vxlan-l2xcbase-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4vxlan-l2xcbase-pdrdisc + +VPP PDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented +in the graph below. PDR measured for 0.5% packet loss ratio. + +.. raw:: html + + + +*Figure 4. VPP 2thread 2core - PDR Throughput for Phy-to-Phy IPv4 Overlay Tunnels.* + +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + +.. code-block:: bash + + $ cd $CSIT/tests/vpp/perf/ip4_tunnels + $ grep -E "64B-2t2c-ethip4[a-z0-9]+-[a-z0-9]*-pdrdisc" * + + 10ge2p1x520-ethip4lispip4-ip4base-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4lispip4-ip4base-pdrdisc + 10ge2p1x520-ethip4lispip6-ip4base-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4lispip6-ip4base-pdrdisc + 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4vxlan-l2bdbasemaclrn-pdrdisc + 10ge2p1x520-ethip4vxlan-l2xcbase-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4vxlan-l2xcbase-pdrdisc + diff --git a/docs/report/vpp_performance_tests/packet_throughput_graphs/ip6.rst b/docs/report/vpp_performance_tests/packet_throughput_graphs/ip6.rst new file mode 100644 index 0000000000..09a44884c2 --- /dev/null +++ b/docs/report/vpp_performance_tests/packet_throughput_graphs/ip6.rst @@ -0,0 +1,116 @@ +IPv6 Routed-Forwarding +====================== + +Following sections include summary graphs of VPP Phy-to-Phy performance +with IPv6 Routed-Forwarding, including NDR throughput (zero packet loss) +and PDR throughput (<0.5% packet loss). Performance is reported for VPP +running in multiple configurations of VPP worker thread(s), a.k.a. VPP +data plane thread(s), and their physical CPU core(s) placement. + +NDR Throughput +~~~~~~~~~~~~~~ + +VPP NDR 78B packet throughput in 1t1c setup (1thread, 1core) is presented +in the graph below. + +.. raw:: html + + + +*Figure 1. VPP 1thread 1core - NDR Throughput for Phy-to-Phy IPv6 +Routed-Forwarding.* + +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + +.. code-block:: bash + + $ cd $CSIT/tests/vpp/perf/ip6 + $ grep -E "78B-1t1c-ethip6-ip6[a-z0-9]+-[a-z-]*ndrdisc" * + + 10ge2p1x520-ethip6-ip6base-copwhtlistbase-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-copwhtlistbase-ndrdisc + 10ge2p1x520-ethip6-ip6base-iacldstbase-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-iacldstbase-ndrdisc + 10ge2p1x520-ethip6-ip6base-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-ndrdisc + 10ge2p1x520-ethip6-ip6scale200k-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6scale200k-ndrdisc + 10ge2p1x520-ethip6-ip6scale20k-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6scale20k-ndrdisc + 10ge2p1x520-ethip6-ip6scale2m-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6scale2m-ndrdisc + 40ge2p1xl710-ethip6-ip6base-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-ndrdisc + +VPP NDR 78B packet throughput in 2t2c setup (2thread, 2core) is presented +in the graph below. + +.. raw:: html + + + +*Figure 2. VPP 2threads 2cores - NDR Throughput for Phy-to-Phy IPv6 +Routed-Forwarding.* + +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + +.. code-block:: bash + + $ cd $CSIT/tests/vpp/perf/ip6 + $ grep -E "78B-2t2c-ethip6-ip6[a-z0-9]+-[a-z-]*ndrdisc" * + + 10ge2p1x520-ethip6-ip6base-copwhtlistbase-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-copwhtlistbase-ndrdisc + 10ge2p1x520-ethip6-ip6base-iacldstbase-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-iacldstbase-ndrdisc + 10ge2p1x520-ethip6-ip6base-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-ndrdisc + 10ge2p1x520-ethip6-ip6scale200k-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6scale200k-ndrdisc + 10ge2p1x520-ethip6-ip6scale20k-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6scale20k-ndrdisc + 10ge2p1x520-ethip6-ip6scale2m-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6scale2m-ndrdisc + 40ge2p1xl710-ethip6-ip6base-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-ndrdisc + +PDR Throughput +~~~~~~~~~~~~~~ + +VPP PDR 78B packet throughput in 1t1c setup (1thread, 1core) is presented +in the graph below. PDR measured for 0.5% packet loss ratio. + +.. raw:: html + + + +*Figure 3. VPP 1thread 1core - PDR Throughput for Phy-to-Phy IPv6 +Routed-Forwarding.* + +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + +.. code-block:: bash + + $ cd $CSIT/tests/vpp/perf/ip6 + $ grep -E "78B-1t1c-ethip6-ip6[a-z0-9]+-[a-z-]*pdrdisc" * + + 10ge2p1x520-ethip6-ip6base-copwhtlistbase-ndrpdrdisc.robot:| tc02-78B-1t1c-ethip6-ip6base-copwhtlistbase-pdrdisc + 10ge2p1x520-ethip6-ip6base-iacldstbase-ndrpdrdisc.robot:| tc02-78B-1t1c-ethip6-ip6base-iacldstbase-pdrdisc + 10ge2p1x520-ethip6-ip6base-ndrpdrdisc.robot:| tc02-78B-1t1c-ethip6-ip6base-pdrdisc + 10ge2p1x520-ethip6-ip6scale200k-ndrpdrdisc.robot:| tc02-78B-1t1c-ethip6-ip6scale200k-pdrdisc + 10ge2p1x520-ethip6-ip6scale20k-ndrpdrdisc.robot:| tc02-78B-1t1c-ethip6-ip6scale20k-pdrdisc + 10ge2p1x520-ethip6-ip6scale2m-ndrpdrdisc.robot:| tc02-78B-1t1c-ethip6-ip6scale2m-pdrdisc + +VPP PDR 78B packet throughput in 2t2c setup (2thread, 2core) is presented +in the graph below. PDR measured for 0.5% packet loss ratio. + +.. raw:: html + + + +*Figure 4. VPP 2thread 2core - PDR Throughput for Phy-to-Phy IPv6 +Routed-Forwarding.* + +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + +.. code-block:: bash + + $ cd $CSIT/tests/vpp/perf/ip6 + $ grep -E "78B-2t2c-ethip6-ip6[a-z0-9]+-[a-z-]*pdrdisc" * + + 10ge2p1x520-ethip6-ip6base-copwhtlistbase-ndrpdrdisc.robot:| tc08-78B-2t2c-ethip6-ip6base-copwhtlistbase-pdrdisc + 10ge2p1x520-ethip6-ip6base-iacldstbase-ndrpdrdisc.robot:| tc08-78B-2t2c-ethip6-ip6base-iacldstbase-pdrdisc + 10ge2p1x520-ethip6-ip6base-ndrpdrdisc.robot:| tc08-78B-2t2c-ethip6-ip6base-pdrdisc + 10ge2p1x520-ethip6-ip6scale200k-ndrpdrdisc.robot:| tc08-78B-2t2c-ethip6-ip6scale200k-pdrdisc + 10ge2p1x520-ethip6-ip6scale20k-ndrpdrdisc.robot:| tc08-78B-2t2c-ethip6-ip6scale20k-pdrdisc + 10ge2p1x520-ethip6-ip6scale2m-ndrpdrdisc.robot:| tc08-78B-2t2c-ethip6-ip6scale2m-pdrdisc diff --git a/docs/report/vpp_performance_tests/packet_throughput_graphs/ip6_tunnels.rst b/docs/report/vpp_performance_tests/packet_throughput_graphs/ip6_tunnels.rst new file mode 100644 index 0000000000..b54bdfeccd --- /dev/null +++ b/docs/report/vpp_performance_tests/packet_throughput_graphs/ip6_tunnels.rst @@ -0,0 +1,98 @@ +IPv6 Overlay Tunnels +==================== + +Following sections include summary graphs of VPP Phy-to-Phy performance +with IPv6 Overlay Tunnels, including NDR throughput (zero packet loss) +and PDR throughput (<0.5% packet loss). Performance is reported for VPP +running in multiple configurations of VPP worker thread(s), a.k.a. VPP +data plane thread(s), and their physical CPU core(s) placement. + +NDR Throughput +~~~~~~~~~~~~~~ + +VPP NDR 78B packet throughput in 1t1c setup (1thread, 1core) is presented +in the graph below. + +.. raw:: html + + + +*Figure 1. VPP 1thread 1core - NDR Throughput for Phy-to-Phy IPv6 Overlay +Tunnels.* + +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + +.. code-block:: bash + + $ cd $CSIT/tests/vpp/perf/ip6_tunnels + $ grep -E "78B-1t1c-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc" * + + 10ge2p1x520-ethip6lispip4-ip6base-ndrdisc.robot:| tc01-78B-1t1c-ethip6lispip4-ip6base-ndrdisc + 10ge2p1x520-ethip6lispip6-ip6base-ndrdisc.robot:| tc01-78B-1t1c-ethip6lispip6-ip6base-ndrdisc + +VPP NDR 78B packet throughput in 2t2c setup (2thread, 2core) is presented +in the graph below. + +.. raw:: html + + + +*Figure 2. VPP 2threads 2cores - NDR Throughput for Phy-to-Phy IPv6 Overlay +Tunnels.* + +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + +.. code-block:: bash + + $ cd $CSIT/tests/vpp/perf/ip6_tunnels + $ grep -E "78B-2t2c-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc" * + + 10ge2p1x520-ethip6lispip4-ip6base-ndrdisc.robot:| tc07-78B-2t2c-ethip6lispip4-ip6base-ndrdisc + 10ge2p1x520-ethip6lispip6-ip6base-ndrdisc.robot:| tc07-78B-2t2c-ethip6lispip6-ip6base-ndrdisc + +PDR Throughput +~~~~~~~~~~~~~~ + +VPP PDR 78B packet throughput in 1t1c setup (1thread, 1core) is presented +in the graph below. PDR measured for 0.5% packet loss ratio. + +.. raw:: html + + + +*Figure 3. VPP 1thread 1core - PDR Throughput for Phy-to-Phy IPv6 Overlay +Tunnels.* + +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + +.. code-block:: bash + + $ cd $CSIT/tests/vpp/perf/ip6_tunnels + $ grep -E "78B-1t1c-ethip6[a-z0-9]+-[a-z0-9]*-pdrdisc" * + + 10ge2p1x520-ethip6lispip4-ip6base-ndrdisc.robot:| tc02-78B-1t1c-ethip6lispip4-ip6base-pdrdisc + 10ge2p1x520-ethip6lispip6-ip6base-ndrdisc.robot:| tc02-78B-1t1c-ethip6lispip6-ip6base-pdrdisc + +VPP PDR 78B packet throughput in 2t2c setup (2thread, 2core) is presented +in the graph below. PDR measured for 0.5% packet loss ratio. + +.. raw:: html + + + +*Figure 4. VPP 2thread 2core - PDR Throughput for Phy-to-Phy IPv6 Overlay +Tunnels.* + +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + +.. code-block:: bash + + $ cd $CSIT/tests/vpp/perf/ip6_tunnels + $ grep -E "78B-2t2c-ethip6[a-z0-9]+-[a-z0-9]*-pdrdisc" * + + 10ge2p1x520-ethip6lispip4-ip6base-ndrdisc.robot:| tc08-78B-2t2c-ethip6lispip4-ip6base-pdrdisc + 10ge2p1x520-ethip6lispip6-ip6base-ndrdisc.robot:| tc08-78B-2t2c-ethip6lispip6-ip6base-pdrdisc diff --git a/docs/report/vpp_performance_tests/packet_throughput_graphs/ipsec.rst b/docs/report/vpp_performance_tests/packet_throughput_graphs/ipsec.rst index 1a519ecbe6..9f9fc8b4f0 100644 --- a/docs/report/vpp_performance_tests/packet_throughput_graphs/ipsec.rst +++ b/docs/report/vpp_performance_tests/packet_throughput_graphs/ipsec.rst @@ -35,17 +35,16 @@ git repository: $ cd $CSIT/tests/vpp/perf/crypto $ grep -E "64B-1t1c-.*ipsec.*-ndrdisc" * - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecscale1ip4-ip4base-interfaces-aes-gcm-ndrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc03-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-interfaces-aes-gcm-ndrdisc - {NOT PLOTTED} 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-cbc-sha1-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecscale1ip4-ip4base-interfaces-cbc-sha1-ndrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-cbc-sha1-ndrpdrdisc.robot:| tc03-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-interfaces-cbc-sha1-ndrdisc - {NOT PLOTTED} 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-aes-gcm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecscale1ip4-ip4base-tunnels-aes-gcm-ndrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-aes-gcm-ndrpdrdisc.robot:| tc03-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-tunnels-aes-gcm-ndrdisc - {NOT PLOTTED} 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-cbc-sha1-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecscale1ip4-ip4base-tunnels-cbc-sha1-ndrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-cbc-sha1-ndrpdrdisc.robot:| tc03-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-tunnels-cbc-sha1-ndrdisc + 40ge2p1xl710-ethip4ipsecbasetnl-ip4base-int-aes-gcm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecbasetnl-ip4base-int-aes-gcm-ndrdisc + 40ge2p1xl710-ethip4ipsecbasetnl-ip4base-int-cbc-sha1-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecbasetnl-ip4base-int-cbc-sha1-ndrdisc + 40ge2p1xl710-ethip4ipsecbasetnl-ip4base-tnl-aes-gcm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecbasetnl-ip4base-tnl-aes-gcm-ndrdisc + 40ge2p1xl710-ethip4ipsecbasetnl-ip4base-tnl-cbc-sha1-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecbasetnl-ip4base-tnl-cbc-sha1-ndrdisc + 40ge2p1xl710-ethip4ipsecscale1000tnl-ip4base-int-aes-gcm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecscale1000tnl-ip4base-int-aes-gcm-ndrdisc + 40ge2p1xl710-ethip4ipsecscale1000tnl-ip4base-int-cbc-sha1-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecscale1000tnl-ip4base-int-cbc-sha1-ndrdisc + 40ge2p1xl710-ethip4ipsecscale1000tnl-ip4base-tnl-aes-gcm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecscale1000tnl-ip4base-tnl-aes-gcm-ndrdisc + 40ge2p1xl710-ethip4ipsecscale1000tnl-ip4base-tnl-cbc-sha1-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecscale1000tnl-ip4base-tnl-cbc-sha1-ndrdisc 40ge2p1xl710-ethip4ipsectptlispgpe-ip4base-cbc-sha1-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsectptlispgpe-ip4base-cbc-sha1-ndrdisc - VPP NDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented in the graph below. @@ -63,14 +62,14 @@ git repository: $ cd $CSIT/tests/vpp/perf/crypto $ grep -E "64B-2t2c-.*ipsec.*-ndrdisc" * - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc13-64B-2t2c-ethip4ipsecscale1ip4-ip4base-interfaces-aes-gcm-ndrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc15-64B-2t2c-ethip4ipsecscale1000ip4-ip4base-interfaces-aes-gcm-ndrdisc - {NOT PLOTTED} 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-cbc-sha1-ndrpdrdisc.robot:| tc13-64B-2t2c-ethip4ipsecscale1ip4-ip4base-interfaces-cbc-sha1-ndrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-cbc-sha1-ndrpdrdisc.robot:| tc15-64B-2t2c-ethip4ipsecscale1000ip4-ip4base-interfaces-cbc-sha1-ndrdisc - {NOT PLOTTED} 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-aes-gcm-ndrpdrdisc.robot:| tc13-64B-2t2c-ethip4ipsecscale1ip4-ip4base-tunnels-aes-gcm-ndrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-aes-gcm-ndrpdrdisc.robot:| tc15-64B-2t2c-ethip4ipsecscale1000ip4-ip4base-tunnels-aes-gcm-ndrdisc - {NOT PLOTTED} 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-cbc-sha1-ndrpdrdisc.robot:| tc13-64B-2t2c-ethip4ipsecscale1ip4-ip4base-tunnels-cbc-sha1-ndrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-cbc-sha1-ndrpdrdisc.robot:| tc15-64B-2t2c-ethip4ipsecscale1000ip4-ip4base-tunnels-cbc-sha1-ndrdisc + 40ge2p1xl710-ethip4ipsecbasetnl-ip4base-int-aes-gcm-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4ipsecbasetnl-ip4base-int-aes-gcm-ndrdisc + 40ge2p1xl710-ethip4ipsecbasetnl-ip4base-int-cbc-sha1-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4ipsecbasetnl-ip4base-int-cbc-sha1-ndrdisc + 40ge2p1xl710-ethip4ipsecbasetnl-ip4base-tnl-aes-gcm-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4ipsecbasetnl-ip4base-tnl-aes-gcm-ndrdisc + 40ge2p1xl710-ethip4ipsecbasetnl-ip4base-tnl-cbc-sha1-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4ipsecbasetnl-ip4base-tnl-cbc-sha1-ndrdisc + 40ge2p1xl710-ethip4ipsecscale1000tnl-ip4base-int-aes-gcm-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4ipsecscale1000tnl-ip4base-int-aes-gcm-ndrdisc + 40ge2p1xl710-ethip4ipsecscale1000tnl-ip4base-int-cbc-sha1-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4ipsecscale1000tnl-ip4base-int-cbc-sha1-ndrdisc + 40ge2p1xl710-ethip4ipsecscale1000tnl-ip4base-tnl-aes-gcm-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4ipsecscale1000tnl-ip4base-tnl-aes-gcm-ndrdisc + 40ge2p1xl710-ethip4ipsecscale1000tnl-ip4base-tnl-cbc-sha1-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4ipsecscale1000tnl-ip4base-tnl-cbc-sha1-ndrdisc 40ge2p1xl710-ethip4ipsectptlispgpe-ip4base-cbc-sha1-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4ipsectptlispgpe-ip4base-cbc-sha1-ndrdisc PDR Throughput @@ -93,17 +92,16 @@ git repository: $ cd $CSIT/tests/vpp/perf/crypto $ grep -E "64B-1t1c-.*ipsec.*-pdrdisc" * - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4ipsecscale1ip4-ip4base-interfaces-aes-gcm-pdrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc04-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-interfaces-aes-gcm-pdrdisc - {NOT PLOTTED} 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-cbc-sha1-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4ipsecscale1ip4-ip4base-interfaces-cbc-sha1-pdrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-cbc-sha1-ndrpdrdisc.robot:| tc04-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-interfaces-cbc-sha1-pdrdisc - {NOT PLOTTED} 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-aes-gcm-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4ipsecscale1ip4-ip4base-tunnels-aes-gcm-pdrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-aes-gcm-ndrpdrdisc.robot:| tc04-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-tunnels-aes-gcm-pdrdisc - {NOT PLOTTED} 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-cbc-sha1-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4ipsecscale1ip4-ip4base-tunnels-cbc-sha1-pdrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-cbc-sha1-ndrpdrdisc.robot:| tc04-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-tunnels-cbc-sha1-pdrdisc + 40ge2p1xl710-ethip4ipsecbasetnl-ip4base-int-aes-gcm-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4ipsecbasetnl-ip4base-int-aes-gcm-pdrdisc + 40ge2p1xl710-ethip4ipsecbasetnl-ip4base-int-cbc-sha1-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4ipsecbasetnl-ip4base-int-cbc-sha1-pdrdisc + 40ge2p1xl710-ethip4ipsecbasetnl-ip4base-tnl-aes-gcm-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4ipsecbasetnl-ip4base-tnl-aes-gcm-pdrdisc + 40ge2p1xl710-ethip4ipsecbasetnl-ip4base-tnl-cbc-sha1-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4ipsecbasetnl-ip4base-tnl-cbc-sha1-pdrdisc + 40ge2p1xl710-ethip4ipsecscale1000tnl-ip4base-int-aes-gcm-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4ipsecscale1000tnl-ip4base-int-aes-gcm-pdrdisc + 40ge2p1xl710-ethip4ipsecscale1000tnl-ip4base-int-cbc-sha1-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4ipsecscale1000tnl-ip4base-int-cbc-sha1-pdrdisc + 40ge2p1xl710-ethip4ipsecscale1000tnl-ip4base-tnl-aes-gcm-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4ipsecscale1000tnl-ip4base-tnl-aes-gcm-pdrdisc + 40ge2p1xl710-ethip4ipsecscale1000tnl-ip4base-tnl-cbc-sha1-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4ipsecscale1000tnl-ip4base-tnl-cbc-sha1-pdrdisc 40ge2p1xl710-ethip4ipsectptlispgpe-ip4base-cbc-sha1-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4ipsectptlispgpe-ip4base-cbc-sha1-pdrdisc - VPP PDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented in the graph below. PDR measured for 0.5% packet loss ratio. @@ -121,13 +119,12 @@ git repository: $ cd $CSIT/tests/vpp/perf/crypto $ grep -E "64B-2t2c-.*ipsec.*-pdrdisc" * - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc14-64B-2t2c-ethip4ipsecscale1ip4-ip4base-interfaces-aes-gcm-pdrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc16-64B-2t2c-ethip4ipsecscale1000ip4-ip4base-interfaces-aes-gcm-pdrdisc - {NOT PLOTTED} 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-cbc-sha1-ndrpdrdisc.robot:| tc14-64B-2t2c-ethip4ipsecscale1ip4-ip4base-interfaces-cbc-sha1-pdrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-cbc-sha1-ndrpdrdisc.robot:| tc16-64B-2t2c-ethip4ipsecscale1000ip4-ip4base-interfaces-cbc-sha1-pdrdisc - {NOT PLOTTED} 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-aes-gcm-ndrpdrdisc.robot:| tc14-64B-2t2c-ethip4ipsecscale1ip4-ip4base-tunnels-aes-gcm-pdrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-aes-gcm-ndrpdrdisc.robot:| tc16-64B-2t2c-ethip4ipsecscale1000ip4-ip4base-tunnels-aes-gcm-pdrdisc - {NOT PLOTTED} 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-cbc-sha1-ndrpdrdisc.robot:| tc14-64B-2t2c-ethip4ipsecscale1ip4-ip4base-tunnels-cbc-sha1-pdrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-cbc-sha1-ndrpdrdisc.robot:| tc16-64B-2t2c-ethip4ipsecscale1000ip4-ip4base-tunnels-cbc-sha1-pdrdisc + 40ge2p1xl710-ethip4ipsecbasetnl-ip4base-int-aes-gcm-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4ipsecbasetnl-ip4base-int-aes-gcm-pdrdisc + 40ge2p1xl710-ethip4ipsecbasetnl-ip4base-int-cbc-sha1-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4ipsecbasetnl-ip4base-int-cbc-sha1-pdrdisc + 40ge2p1xl710-ethip4ipsecbasetnl-ip4base-tnl-aes-gcm-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4ipsecbasetnl-ip4base-tnl-aes-gcm-pdrdisc + 40ge2p1xl710-ethip4ipsecbasetnl-ip4base-tnl-cbc-sha1-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4ipsecbasetnl-ip4base-tnl-cbc-sha1-pdrdisc + 40ge2p1xl710-ethip4ipsecscale1000tnl-ip4base-int-aes-gcm-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4ipsecscale1000tnl-ip4base-int-aes-gcm-pdrdisc + 40ge2p1xl710-ethip4ipsecscale1000tnl-ip4base-int-cbc-sha1-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4ipsecscale1000tnl-ip4base-int-cbc-sha1-pdrdisc + 40ge2p1xl710-ethip4ipsecscale1000tnl-ip4base-tnl-aes-gcm-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4ipsecscale1000tnl-ip4base-tnl-aes-gcm-pdrdisc + 40ge2p1xl710-ethip4ipsecscale1000tnl-ip4base-tnl-cbc-sha1-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4ipsecscale1000tnl-ip4base-tnl-cbc-sha1-pdrdisc 40ge2p1xl710-ethip4ipsectptlispgpe-ip4base-cbc-sha1-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4ipsectptlispgpe-ip4base-cbc-sha1-pdrdisc - diff --git a/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv4.rst b/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv4.rst deleted file mode 100644 index 89a41bcc06..0000000000 --- a/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv4.rst +++ /dev/null @@ -1,126 +0,0 @@ -IPv4 Routed-Forwarding -====================== - -Following sections include summary graphs of VPP Phy-to-Phy performance -with IPv4 Routed-Forwarding, including NDR throughput (zero packet loss) -and PDR throughput (<0.5% packet loss). Performance is reported for VPP -running in multiple configurations of VPP worker thread(s), a.k.a. VPP -data plane thread(s), and their physical CPU core(s) placement. - -NDR Throughput -~~~~~~~~~~~~~~ - -VPP NDR 64B packet throughput in 1t1c setup (1thread, 1core) is presented -in the graph below. - -.. raw:: html - - - -*Figure 1. VPP 1thread 1core - NDR Throughput for Phy-to-Phy IPv4 Routed-Forwarding.* - -CSIT source code for the test cases used for above plots can be found in CSIT -git repository: - -.. code-block:: bash - - $ cd $CSIT/tests/vpp/perf/ipv4 - $ grep -P '64B-1t1c-ethip4-ip4(base|scale)[a-z0-9]*(?!-eth-[0-9]vhost).*-ndrdisc' * - - 10ge2p1x520-ethip4-ip4base-copwhtlistbase-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-copwhtlistbase-ndrdisc - 10ge2p1x520-ethip4-ip4base-iacldstbase-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-iacldstbase-ndrdisc - 10ge2p1x520-ethip4-ip4base-ipolicemarkbase-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ipolicemarkbase-ndrdisc - 10ge2p1x520-ethip4-ip4base-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ndrdisc - 10ge2p1x520-ethip4-ip4base-snat-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-snat-1u-1p-ndrdisc - 10ge2p1x520-ethip4-ip4scale200k-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale200k-ndrdisc - 10ge2p1x520-ethip4-ip4scale20k-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale20k-ndrdisc - 10ge2p1x520-ethip4-ip4scale2m-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale2m-ndrdisc - 10ge2p1x520-ethip4-ip4scale-snat-ndrpdrdisc.robot:| tc11-64B-1t1c-ethip4-ip4base-snat-4000u-15p-ndrdisc - 40ge2p1xl710-ethip4-ip4base-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ndrdisc - -VPP NDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented -in the graph below. - -.. raw:: html - - - -*Figure 2. VPP 2threads 2cores - NDR Throughput for Phy-to-Phy IPv4 -Routed-Forwarding.* - -CSIT source code for the test cases used for above plots can be found in CSIT -git repository: - -.. code-block:: bash - - $ cd $CSIT/tests/vpp/perf/ipv4 - $ grep -P '64B-2t2c-ethip4-ip4(base|scale)[a-z0-9]*(?!-eth-[0-9]vhost).*-ndrdisc' * - - 10ge2p1x520-ethip4-ip4base-copwhtlistbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-copwhtlistbase-ndrdisc - 10ge2p1x520-ethip4-ip4base-iacldstbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-iacldstbase-ndrdisc - 10ge2p1x520-ethip4-ip4base-ipolicemarkbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ipolicemarkbase-ndrdisc - 10ge2p1x520-ethip4-ip4base-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ndrdisc - 10ge2p1x520-ethip4-ip4scale200k-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale200k-ndrdisc - 10ge2p1x520-ethip4-ip4scale20k-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale20k-ndrdisc - 10ge2p1x520-ethip4-ip4scale2m-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale2m-ndrdisc - 40ge2p1xl710-ethip4-ip4base-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ndrdisc - -PDR Throughput -~~~~~~~~~~~~~~ - -VPP PDR 64B packet throughput in 1t1c setup (1thread, 1core) is presented -in the graph below. PDR measured for 0.5% packet loss ratio. - -.. raw:: html - - - -*Figure 3. VPP 1thread 1core - PDR Throughput for Phy-to-Phy IPv4 -Routed-Forwarding.* - -CSIT source code for the test cases used for above plots can be found in CSIT -git repository: - -.. code-block:: bash - - $ cd $CSIT/tests/vpp/perf/ipv4 - $ grep -P '64B-1t1c-ethip4-ip4(base|scale)[a-z0-9]*(?!-eth-[0-9]vhost).*-pdrdisc' * - - 10ge2p1x520-ethip4-ip4base-copwhtlistbase-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-copwhtlistbase-pdrdisc - 10ge2p1x520-ethip4-ip4base-iacldstbase-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-iacldstbase-pdrdisc - 10ge2p1x520-ethip4-ip4base-ipolicemarkbase-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ipolicemarkbase-pdrdisc - 10ge2p1x520-ethip4-ip4base-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-pdrdisc - 10ge2p1x520-ethip4-ip4base-snat-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-snat-1u-1p-pdrdisc - 10ge2p1x520-ethip4-ip4scale200k-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale200k-pdrdisc - 10ge2p1x520-ethip4-ip4scale20k-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale20k-pdrdisc - 10ge2p1x520-ethip4-ip4scale2m-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale2m-pdrdisc - 10ge2p1x520-ethip4-ip4scale-snat-ndrpdrdisc.robot:| tc11-64B-1t1c-ethip4-ip4base-snat-4000u-15p-pdrdisc - 40ge2p1xl710-ethip4-ip4base-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-pdrdisc - -VPP PDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented -in the graph below. PDR measured for 0.5% packet loss ratio. - -.. raw:: html - - - -*Figure 4. VPP 2thread 2core - PDR Throughput for Phy-to-Phy IPv4 -Routed-Forwarding.* - -CSIT source code for the test cases used for above plots can be found in CSIT -git repository: - -.. code-block:: bash - - $ cd $CSIT/tests/vpp/perf/ipv4 - $ grep -P '64B-2t2c-ethip4-ip4(base|scale)[a-z0-9]*(?!-eth-[0-9]vhost).*-pdrdisc' * - - 10ge2p1x520-ethip4-ip4base-copwhtlistbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-copwhtlistbase-pdrdisc - 10ge2p1x520-ethip4-ip4base-iacldstbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-iacldstbase-pdrdisc - 10ge2p1x520-ethip4-ip4base-ipolicemarkbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ipolicemarkbase-pdrdisc - 10ge2p1x520-ethip4-ip4base-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-pdrdisc - 10ge2p1x520-ethip4-ip4scale200k-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale200k-pdrdisc - 10ge2p1x520-ethip4-ip4scale20k-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale20k-pdrdisc - 10ge2p1x520-ethip4-ip4scale2m-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale2m-pdrdisc - 40ge2p1xl710-ethip4-ip4base-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-pdrdisc - diff --git a/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv4_tunnels.rst b/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv4_tunnels.rst deleted file mode 100644 index e1204af9ec..0000000000 --- a/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv4_tunnels.rst +++ /dev/null @@ -1,105 +0,0 @@ -IPv4 Overlay Tunnels -==================== - -Following sections include summary graphs of VPP Phy-to-Phy performance -with IPv4 Overlay Tunnels, including NDR throughput (zero packet loss) -and PDR throughput (<0.5% packet loss). Performance is reported for VPP -running in multiple configurations of VPP worker thread(s), a.k.a. VPP -data plane thread(s), and their physical CPU core(s) placement. - -NDR Throughput -~~~~~~~~~~~~~~ - -VPP NDR 64B packet throughput in 1t1c setup (1thread, 1core) is presented -in the graph below. - -.. raw:: html - - - -*Figure 1. VPP 1thread 1core - NDR Throughput for Phy-to-Phy IPv4 Overlay -Tunnels.* - -CSIT source code for the test cases used for above plots can be found in CSIT -git repository: - -.. code-block:: bash - - $ cd $CSIT/tests/vpp/perf/ipv4_tunnels - $ grep -E "64B-1t1c-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc" * - - 10ge2p1x520-ethip4lispip4-ip4base-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4lispip4-ip4base-ndrdisc - 10ge2p1x520-ethip4lispip6-ip4base-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4lispip6-ip4base-ndrdisc - 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4vxlan-l2bdbasemaclrn-ndrdisc - 10ge2p1x520-ethip4vxlan-l2xcbase-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4vxlan-l2xcbase-ndrdisc - -VPP NDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented -in the graph below. - -.. raw:: html - - - -*Figure 2. VPP 2threads 2cores - NDR Throughput for Phy-to-Phy IPv4 Overlay Tunnels.* - -CSIT source code for the test cases used for above plots can be found in CSIT -git repository: - -.. code-block:: bash - - $ cd $CSIT/tests/vpp/perf/ipv4_tunnels - $ grep -E "64B-2t2c-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc" * - - 10ge2p1x520-ethip4lispip4-ip4base-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4lispip4-ip4base-ndrdisc - 10ge2p1x520-ethip4lispip6-ip4base-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4lispip6-ip4base-ndrdisc - 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4vxlan-l2bdbasemaclrn-ndrdisc - 10ge2p1x520-ethip4vxlan-l2xcbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4vxlan-l2xcbase-ndrdisc - -PDR Throughput -~~~~~~~~~~~~~~ - -VPP PDR 64B packet throughput in 1t1c setup (1thread, 1core) is presented -in the graph below. PDR measured for 0.5% packet loss ratio. - -.. raw:: html - - - -*Figure 3. VPP 1thread 1core - PDR Throughput for Phy-to-Phy IPv4 Overlay -Tunnels.* - -CSIT source code for the test cases used for above plots can be found in CSIT -git repository: - -.. code-block:: bash - - $ cd $CSIT/tests/vpp/perf/ipv4_tunnels - $ grep -E "64B-1t1c-ethip4[a-z0-9]+-[a-z0-9]*-pdrdisc" * - - 10ge2p1x520-ethip4lispip4-ip4base-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4lispip4-ip4base-pdrdisc - 10ge2p1x520-ethip4lispip6-ip4base-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4lispip6-ip4base-pdrdisc - 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4vxlan-l2bdbasemaclrn-pdrdisc - 10ge2p1x520-ethip4vxlan-l2xcbase-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4vxlan-l2xcbase-pdrdisc - -VPP PDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented -in the graph below. PDR measured for 0.5% packet loss ratio. - -.. raw:: html - - - -*Figure 4. VPP 2thread 2core - PDR Throughput for Phy-to-Phy IPv4 Overlay Tunnels.* - -CSIT source code for the test cases used for above plots can be found in CSIT -git repository: - -.. code-block:: bash - - $ cd $CSIT/tests/vpp/perf/ipv4_tunnels - $ grep -E "64B-2t2c-ethip4[a-z0-9]+-[a-z0-9]*-pdrdisc" * - - 10ge2p1x520-ethip4lispip4-ip4base-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4lispip4-ip4base-pdrdisc - 10ge2p1x520-ethip4lispip6-ip4base-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4lispip6-ip4base-pdrdisc - 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4vxlan-l2bdbasemaclrn-pdrdisc - 10ge2p1x520-ethip4vxlan-l2xcbase-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4vxlan-l2xcbase-pdrdisc - diff --git a/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv6.rst b/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv6.rst deleted file mode 100644 index 4331f492f5..0000000000 --- a/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv6.rst +++ /dev/null @@ -1,117 +0,0 @@ -IPv6 Routed-Forwarding -====================== - -Following sections include summary graphs of VPP Phy-to-Phy performance -with IPv6 Routed-Forwarding, including NDR throughput (zero packet loss) -and PDR throughput (<0.5% packet loss). Performance is reported for VPP -running in multiple configurations of VPP worker thread(s), a.k.a. VPP -data plane thread(s), and their physical CPU core(s) placement. - -NDR Throughput -~~~~~~~~~~~~~~ - -VPP NDR 78B packet throughput in 1t1c setup (1thread, 1core) is presented -in the graph below. - -.. raw:: html - - - -*Figure 1. VPP 1thread 1core - NDR Throughput for Phy-to-Phy IPv6 -Routed-Forwarding.* - -CSIT source code for the test cases used for above plots can be found in CSIT -git repository: - -.. code-block:: bash - - $ cd $CSIT/tests/vpp/perf/ipv6 - $ grep -E "78B-1t1c-ethip6-ip6[a-z0-9]+-[a-z-]*ndrdisc" * - - 10ge2p1x520-ethip6-ip6base-copwhtlistbase-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-copwhtlistbase-ndrdisc - 10ge2p1x520-ethip6-ip6base-iacldstbase-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-iacldstbase-ndrdisc - 10ge2p1x520-ethip6-ip6base-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-ndrdisc - 10ge2p1x520-ethip6-ip6scale200k-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6scale200k-ndrdisc - 10ge2p1x520-ethip6-ip6scale20k-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6scale20k-ndrdisc - 10ge2p1x520-ethip6-ip6scale2m-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6scale2m-ndrdisc - 40ge2p1xl710-ethip6-ip6base-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-ndrdisc - -VPP NDR 78B packet throughput in 2t2c setup (2thread, 2core) is presented -in the graph below. - -.. raw:: html - - - -*Figure 2. VPP 2threads 2cores - NDR Throughput for Phy-to-Phy IPv6 -Routed-Forwarding.* - -CSIT source code for the test cases used for above plots can be found in CSIT -git repository: - -.. code-block:: bash - - $ cd $CSIT/tests/vpp/perf/ipv6 - $ grep -E "78B-2t2c-ethip6-ip6[a-z0-9]+-[a-z-]*ndrdisc" * - - 10ge2p1x520-ethip6-ip6base-copwhtlistbase-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-copwhtlistbase-ndrdisc - 10ge2p1x520-ethip6-ip6base-iacldstbase-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-iacldstbase-ndrdisc - 10ge2p1x520-ethip6-ip6base-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-ndrdisc - 10ge2p1x520-ethip6-ip6scale200k-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6scale200k-ndrdisc - 10ge2p1x520-ethip6-ip6scale20k-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6scale20k-ndrdisc - 10ge2p1x520-ethip6-ip6scale2m-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6scale2m-ndrdisc - 40ge2p1xl710-ethip6-ip6base-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-ndrdisc - -PDR Throughput -~~~~~~~~~~~~~~ - -VPP PDR 78B packet throughput in 1t1c setup (1thread, 1core) is presented -in the graph below. PDR measured for 0.5% packet loss ratio. - -.. raw:: html - - - -*Figure 3. VPP 1thread 1core - PDR Throughput for Phy-to-Phy IPv6 -Routed-Forwarding.* - -CSIT source code for the test cases used for above plots can be found in CSIT -git repository: - -.. code-block:: bash - - $ cd $CSIT/tests/vpp/perf/ipv6 - $ grep -E "78B-1t1c-ethip6-ip6[a-z0-9]+-[a-z-]*pdrdisc" * - - 10ge2p1x520-ethip6-ip6base-copwhtlistbase-ndrpdrdisc.robot:| tc02-78B-1t1c-ethip6-ip6base-copwhtlistbase-pdrdisc - 10ge2p1x520-ethip6-ip6base-iacldstbase-ndrpdrdisc.robot:| tc02-78B-1t1c-ethip6-ip6base-iacldstbase-pdrdisc - 10ge2p1x520-ethip6-ip6base-ndrpdrdisc.robot:| tc02-78B-1t1c-ethip6-ip6base-pdrdisc - 10ge2p1x520-ethip6-ip6scale200k-ndrpdrdisc.robot:| tc02-78B-1t1c-ethip6-ip6scale200k-pdrdisc - 10ge2p1x520-ethip6-ip6scale20k-ndrpdrdisc.robot:| tc02-78B-1t1c-ethip6-ip6scale20k-pdrdisc - 10ge2p1x520-ethip6-ip6scale2m-ndrpdrdisc.robot:| tc02-78B-1t1c-ethip6-ip6scale2m-pdrdisc - -VPP PDR 78B packet throughput in 2t2c setup (2thread, 2core) is presented -in the graph below. PDR measured for 0.5% packet loss ratio. - -.. raw:: html - - - -*Figure 4. VPP 2thread 2core - PDR Throughput for Phy-to-Phy IPv6 -Routed-Forwarding.* - -CSIT source code for the test cases used for above plots can be found in CSIT -git repository: - -.. code-block:: bash - - $ cd $CSIT/tests/vpp/perf/ipv6 - $ grep -E "78B-2t2c-ethip6-ip6[a-z0-9]+-[a-z-]*pdrdisc" * - - 10ge2p1x520-ethip6-ip6base-copwhtlistbase-ndrpdrdisc.robot:| tc08-78B-2t2c-ethip6-ip6base-copwhtlistbase-pdrdisc - 10ge2p1x520-ethip6-ip6base-iacldstbase-ndrpdrdisc.robot:| tc08-78B-2t2c-ethip6-ip6base-iacldstbase-pdrdisc - 10ge2p1x520-ethip6-ip6base-ndrpdrdisc.robot:| tc08-78B-2t2c-ethip6-ip6base-pdrdisc - 10ge2p1x520-ethip6-ip6scale200k-ndrpdrdisc.robot:| tc08-78B-2t2c-ethip6-ip6scale200k-pdrdisc - 10ge2p1x520-ethip6-ip6scale20k-ndrpdrdisc.robot:| tc08-78B-2t2c-ethip6-ip6scale20k-pdrdisc - 10ge2p1x520-ethip6-ip6scale2m-ndrpdrdisc.robot:| tc08-78B-2t2c-ethip6-ip6scale2m-pdrdisc - diff --git a/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv6_tunnels.rst b/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv6_tunnels.rst deleted file mode 100644 index 283d79613e..0000000000 --- a/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv6_tunnels.rst +++ /dev/null @@ -1,98 +0,0 @@ -IPv6 Overlay Tunnels -==================== - -Following sections include summary graphs of VPP Phy-to-Phy performance -with IPv6 Overlay Tunnels, including NDR throughput (zero packet loss) -and PDR throughput (<0.5% packet loss). Performance is reported for VPP -running in multiple configurations of VPP worker thread(s), a.k.a. VPP -data plane thread(s), and their physical CPU core(s) placement. - -NDR Throughput -~~~~~~~~~~~~~~ - -VPP NDR 78B packet throughput in 1t1c setup (1thread, 1core) is presented -in the graph below. - -.. raw:: html - - - -*Figure 1. VPP 1thread 1core - NDR Throughput for Phy-to-Phy IPv6 Overlay -Tunnels.* - -CSIT source code for the test cases used for above plots can be found in CSIT -git repository: - -.. code-block:: bash - - $ cd $CSIT/tests/vpp/perf/ipv6_tunnels - $ grep -E "78B-1t1c-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc" * - - 10ge2p1x520-ethip6lispip4-ip6base-ndrdisc.robot:| tc01-78B-1t1c-ethip6lispip4-ip6base-ndrdisc - 10ge2p1x520-ethip6lispip6-ip6base-ndrdisc.robot:| tc01-78B-1t1c-ethip6lispip6-ip6base-ndrdisc - -VPP NDR 78B packet throughput in 2t2c setup (2thread, 2core) is presented -in the graph below. - -.. raw:: html - - - -*Figure 2. VPP 2threads 2cores - NDR Throughput for Phy-to-Phy IPv6 Overlay -Tunnels.* - -CSIT source code for the test cases used for above plots can be found in CSIT -git repository: - -.. code-block:: bash - - $ cd $CSIT/tests/vpp/perf/ipv6_tunnels - $ grep -E "78B-2t2c-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc" * - - 10ge2p1x520-ethip6lispip4-ip6base-ndrdisc.robot:| tc07-78B-2t2c-ethip6lispip4-ip6base-ndrdisc - 10ge2p1x520-ethip6lispip6-ip6base-ndrdisc.robot:| tc07-78B-2t2c-ethip6lispip6-ip6base-ndrdisc - -PDR Throughput -~~~~~~~~~~~~~~ - -VPP PDR 78B packet throughput in 1t1c setup (1thread, 1core) is presented -in the graph below. PDR measured for 0.5% packet loss ratio. - -.. raw:: html - - - -*Figure 3. VPP 1thread 1core - PDR Throughput for Phy-to-Phy IPv6 Overlay -Tunnels.* - -CSIT source code for the test cases used for above plots can be found in CSIT -git repository: - -.. code-block:: bash - - $ cd $CSIT/tests/vpp/perf/ipv6_tunnels - $ grep -E "78B-1t1c-ethip6[a-z0-9]+-[a-z0-9]*-pdrdisc" * - - 10ge2p1x520-ethip6lispip4-ip6base-ndrdisc.robot:| tc02-78B-1t1c-ethip6lispip4-ip6base-pdrdisc - 10ge2p1x520-ethip6lispip6-ip6base-ndrdisc.robot:| tc02-78B-1t1c-ethip6lispip6-ip6base-pdrdisc - -VPP PDR 78B packet throughput in 2t2c setup (2thread, 2core) is presented -in the graph below. PDR measured for 0.5% packet loss ratio. - -.. raw:: html - - - -*Figure 4. VPP 2thread 2core - PDR Throughput for Phy-to-Phy IPv6 Overlay -Tunnels.* - -CSIT source code for the test cases used for above plots can be found in CSIT -git repository: - -.. code-block:: bash - - $ cd $CSIT/tests/vpp/perf/ipv6_tunnels - $ grep -E "78B-2t2c-ethip6[a-z0-9]+-[a-z0-9]*-pdrdisc" * - - 10ge2p1x520-ethip6lispip4-ip6base-ndrdisc.robot:| tc08-78B-2t2c-ethip6lispip4-ip6base-pdrdisc - 10ge2p1x520-ethip6lispip6-ip6base-ndrdisc.robot:| tc08-78B-2t2c-ethip6lispip6-ip6base-pdrdisc diff --git a/docs/report/vpp_performance_tests/packet_throughput_graphs/l2.rst b/docs/report/vpp_performance_tests/packet_throughput_graphs/l2.rst index d097d64c9c..e34786f0a5 100644 --- a/docs/report/vpp_performance_tests/packet_throughput_graphs/l2.rst +++ b/docs/report/vpp_performance_tests/packet_throughput_graphs/l2.rst @@ -26,12 +26,49 @@ git repository: .. code-block:: bash $ cd $CSIT/tests/vpp/perf/l2 - $ grep -E "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" * + $ grep -E "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-(iacl.*|oacl.*|eth.*)*ndrdisc" * 10ge2p1vic1227-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc 10ge2p1x520-dot1ad-l2xcbase-ndrpdrdisc.robot:| tc01-64B-1t1c-dot1ad-l2xcbase-ndrdisc 10ge2p1x520-dot1q-l2xcbase-ndrpdrdisc.robot:| tc01-64B-1t1c-dot1q-l2xcbase-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-iacl1-stateless-flows100-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc03-64B-1t1c-eth-l2bdbasemaclrn-iacl10-stateless-flows100-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc05-64B-1t1c-eth-l2bdbasemaclrn-iacl50-stateless-flows100-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc07-64B-1t1c-eth-l2bdbasemaclrn-iacl1-stateless-flows10k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc09-64B-1t1c-eth-l2bdbasemaclrn-iacl10-stateless-flows10k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc11-64B-1t1c-eth-l2bdbasemaclrn-iacl50-stateless-flows10k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc13-64B-1t1c-eth-l2bdbasemaclrn-iacl1-stateless-flows100k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc15-64B-1t1c-eth-l2bdbasemaclrn-iacl10-stateless-flows100k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc17-64B-1t1c-eth-l2bdbasemaclrn-iacl50-stateless-flows100k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-iacl1-statefull-flows100-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc03-64B-1t1c-eth-l2bdbasemaclrn-iacl10-statefull-flows100-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc05-64B-1t1c-eth-l2bdbasemaclrn-iacl50-statefull-flows100-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc07-64B-1t1c-eth-l2bdbasemaclrn-iacl1-statefull-flows10k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc09-64B-1t1c-eth-l2bdbasemaclrn-iacl10-statefull-flows10k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc11-64B-1t1c-eth-l2bdbasemaclrn-iacl50-statefull-flows10k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc13-64B-1t1c-eth-l2bdbasemaclrn-iacl1-statefull-flows100k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc15-64B-1t1c-eth-l2bdbasemaclrn-iacl10-statefull-flows100k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc17-64B-1t1c-eth-l2bdbasemaclrn-iacl50-statefull-flows100k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-oacl1-stateless-flows100-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc03-64B-1t1c-eth-l2bdbasemaclrn-oacl10-stateless-flows100-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc05-64B-1t1c-eth-l2bdbasemaclrn-oacl50-stateless-flows100-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc07-64B-1t1c-eth-l2bdbasemaclrn-oacl1-stateless-flows10k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc09-64B-1t1c-eth-l2bdbasemaclrn-oacl10-stateless-flows10k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc11-64B-1t1c-eth-l2bdbasemaclrn-oacl50-stateless-flows10k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc13-64B-1t1c-eth-l2bdbasemaclrn-oacl1-stateless-flows100k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc15-64B-1t1c-eth-l2bdbasemaclrn-oacl10-stateless-flows100k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc17-64B-1t1c-eth-l2bdbasemaclrn-oacl50-stateless-flows100k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-oacl1-statefull-flows100-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc03-64B-1t1c-eth-l2bdbasemaclrn-oacl10-statefull-flows100-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc05-64B-1t1c-eth-l2bdbasemaclrn-oacl50-statefull-flows100-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc07-64B-1t1c-eth-l2bdbasemaclrn-oacl1-statefull-flows10k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc09-64B-1t1c-eth-l2bdbasemaclrn-oacl10-statefull-flows10k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc11-64B-1t1c-eth-l2bdbasemaclrn-oacl50-statefull-flows10k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc13-64B-1t1c-eth-l2bdbasemaclrn-oacl1-statefull-flows100k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc15-64B-1t1c-eth-l2bdbasemaclrn-oacl10-statefull-flows100k-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc17-64B-1t1c-eth-l2bdbasemaclrn-oacl50-statefull-flows100k-ndrdisc 10ge2p1x520-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc + 10ge2p1x520-eth-l2xcbase-eth-2memif-1lxc-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-eth-2memif-1lxc-ndrdisc 10ge2p1x520-eth-l2xcbase-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-ndrdisc 10ge2p1x710-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc 40ge2p1vic1385-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc @@ -54,12 +91,13 @@ git repository: .. code-block:: bash $ cd $CSIT/tests/vpp/perf/l2 - $ grep -E "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" * + $ grep -E "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-(iacl.*|oacl.*|eth.*)*ndrdisc" * 10ge2p1vic1227-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc 10ge2p1x520-dot1ad-l2xcbase-ndrpdrdisc.robot:| tc07-64B-2t2c-dot1ad-l2xcbase-ndrdisc 10ge2p1x520-dot1q-l2xcbase-ndrpdrdisc.robot:| tc07-64B-2t2c-dot1q-l2xcbase-ndrdisc 10ge2p1x520-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc + 10ge2p1x520-eth-l2xcbase-eth-2memif-1lxc-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-eth-2memif-1lxc-ndrdisc 10ge2p1x520-eth-l2xcbase-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-ndrdisc 10ge2p1x710-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc 40ge2p1vic1385-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc @@ -90,7 +128,44 @@ git repository: 10ge2p1vic1227-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-pdrdisc 10ge2p1x520-dot1ad-l2xcbase-ndrpdrdisc.robot:| tc02-64B-1t1c-dot1ad-l2xcbase-pdrdisc 10ge2p1x520-dot1q-l2xcbase-ndrpdrdisc.robot:| tc02-64B-1t1c-dot1q-l2xcbase-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-iacl1-stateless-flows100-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc04-64B-1t1c-eth-l2bdbasemaclrn-iacl10-stateless-flows100-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc06-64B-1t1c-eth-l2bdbasemaclrn-iacl50-stateless-flows100-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc08-64B-1t1c-eth-l2bdbasemaclrn-iacl1-stateless-flows10k-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc10-64B-1t1c-eth-l2bdbasemaclrn-iacl10-stateless-flows10k-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc12-64B-1t1c-eth-l2bdbasemaclrn-iacl50-stateless-flows10k-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc14-64B-1t1c-eth-l2bdbasemaclrn-iacl1-stateless-flows100k-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc16-64B-1t1c-eth-l2bdbasemaclrn-iacl10-stateless-flows100k-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermit-ndrpdrdisc.robot:| tc18-64B-1t1c-eth-l2bdbasemaclrn-iacl50-stateless-flows100k-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-iacl1-statefull-flows100-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc04-64B-1t1c-eth-l2bdbasemaclrn-iacl10-statefull-flows100-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc06-64B-1t1c-eth-l2bdbasemaclrn-iacl50-statefull-flows100-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc08-64B-1t1c-eth-l2bdbasemaclrn-iacl1-statefull-flows10k-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc10-64B-1t1c-eth-l2bdbasemaclrn-iacl10-statefull-flows10k-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc12-64B-1t1c-eth-l2bdbasemaclrn-iacl50-statefull-flows10k-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc14-64B-1t1c-eth-l2bdbasemaclrn-iacl1-statefull-flows100k-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc16-64B-1t1c-eth-l2bdbasemaclrn-iacl10-statefull-flows100k-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-aclinpermitreflect-ndrpdrdisc.robot:| tc18-64B-1t1c-eth-l2bdbasemaclrn-iacl50-statefull-flows100k-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-oacl1-stateless-flows100-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc04-64B-1t1c-eth-l2bdbasemaclrn-oacl10-stateless-flows100-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc06-64B-1t1c-eth-l2bdbasemaclrn-oacl50-stateless-flows100-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc08-64B-1t1c-eth-l2bdbasemaclrn-oacl1-stateless-flows10k-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc10-64B-1t1c-eth-l2bdbasemaclrn-oacl10-stateless-flows10k-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc12-64B-1t1c-eth-l2bdbasemaclrn-oacl50-stateless-flows10k-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc14-64B-1t1c-eth-l2bdbasemaclrn-oacl1-stateless-flows100k-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc16-64B-1t1c-eth-l2bdbasemaclrn-oacl10-stateless-flows100k-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermit-ndrpdrdisc.robot:| tc18-64B-1t1c-eth-l2bdbasemaclrn-oacl50-stateless-flows100k-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-oacl1-statefull-flows100-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc04-64B-1t1c-eth-l2bdbasemaclrn-oacl10-statefull-flows100-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc06-64B-1t1c-eth-l2bdbasemaclrn-oacl50-statefull-flows100-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc08-64B-1t1c-eth-l2bdbasemaclrn-oacl1-statefull-flows10k-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc10-64B-1t1c-eth-l2bdbasemaclrn-oacl10-statefull-flows10k-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc12-64B-1t1c-eth-l2bdbasemaclrn-oacl50-statefull-flows10k-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc14-64B-1t1c-eth-l2bdbasemaclrn-oacl1-statefull-flows100k-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc16-64B-1t1c-eth-l2bdbasemaclrn-oacl10-statefull-flows100k-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-acloutpermitreflect-ndrpdrdisc.robot:| tc18-64B-1t1c-eth-l2bdbasemaclrn-oacl50-statefull-flows100k-pdrdisc 10ge2p1x520-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-pdrdisc + 10ge2p1x520-eth-l2xcbase-eth-2memif-1lxc-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2xcbase-eth-2memif-1lxc-pdrdisc 10ge2p1x520-eth-l2xcbase-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2xcbase-pdrdisc 10ge2p1x710-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-pdrdisc 40ge2p1vic1385-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-pdrdisc @@ -111,13 +186,13 @@ git repository: .. code-block:: bash $ cd $CSIT/tests/vpp/perf/l2 - $ grep -E "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc" * + $ grep -E "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-(iacl.*|oacl.*|eth.*)*pdrdisc" * 10ge2p1vic1227-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-pdrdisc 10ge2p1x520-dot1ad-l2xcbase-ndrpdrdisc.robot:| tc08-64B-2t2c-dot1ad-l2xcbase-pdrdisc 10ge2p1x520-dot1q-l2xcbase-ndrpdrdisc.robot:| tc08-64B-2t2c-dot1q-l2xcbase-pdrdisc 10ge2p1x520-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-pdrdisc + 10ge2p1x520-eth-l2xcbase-eth-2memif-1lxc-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2xcbase-eth-2memif-1lxc-pdrdisc 10ge2p1x520-eth-l2xcbase-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2xcbase-pdrdisc 10ge2p1x710-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-pdrdisc 40ge2p1vic1385-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-pdrdisc - diff --git a/docs/report/vpp_performance_tests/packet_throughput_graphs/vm_vhost.rst b/docs/report/vpp_performance_tests/packet_throughput_graphs/vm_vhost.rst index fbb3bba4de..40769a6896 100644 --- a/docs/report/vpp_performance_tests/packet_throughput_graphs/vm_vhost.rst +++ b/docs/report/vpp_performance_tests/packet_throughput_graphs/vm_vhost.rst @@ -31,18 +31,27 @@ git repository: 10ge2p1x520-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc 10ge2p1x520-dot1q-l2xcbase-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc - 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc - 10ge2p1x520-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrdisc - 10ge2p1x520-eth-l2xcbase-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc - 10ge2p1x520-eth-l2xcbase-eth-4vhost-2vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-eth-4vhost-2vm-ndrdisc - 10ge2p1x520-ethip4-ip4base-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-eth-2vhost-1vm-ndrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhostvr1024-1vm-cfsrr1-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-eth-2vhostvr1024-1vm-cfsrr1-ndrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhostvr1024-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-eth-2vhostvr1024-1vm-ndrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhostvr256-1vm-cfsrr1-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-eth-2vhostvr256-1vm-cfsrr1-ndrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhostvr256-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-eth-2vhostvr256-1vm-ndrdisc 10ge2p1x520-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-eth-4vhost-2vm-ndrdisc 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhostvr1024-1vm-cfsrr1-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhostvr1024-1vm-cfsrr1-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhostvr1024-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhostvr1024-1vm-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhostvr256-1vm-cfsrr1-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhostvr256-1vm-cfsrr1-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhostvr256-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhostvr256-1vm-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrdisc + 10ge2p1x520-eth-l2xcbase-eth-2vhostvr1024-1vm-cfsrr1-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-eth-2vhostvr1024-1vm-cfsrr1-ndrdisc + 10ge2p1x520-eth-l2xcbase-eth-2vhostvr1024-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-eth-2vhostvr1024-1vm-ndrdisc + 10ge2p1x520-eth-l2xcbase-eth-2vhostvr256-1vm-cfsrr1-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-eth-2vhostvr256-1vm-cfsrr1-ndrdisc + 10ge2p1x520-eth-l2xcbase-eth-2vhostvr256-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-eth-2vhostvr256-1vm-ndrdisc + 10ge2p1x520-eth-l2xcbase-eth-4vhost-2vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-eth-4vhost-2vm-ndrdisc 10ge2p1x710-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc + 40ge2p1xl710-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-ip4base-eth-4vhost-2vm-ndrdisc 40ge2p1xl710-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc 40ge2p1xl710-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrdisc 40ge2p1xl710-eth-l2xcbase-eth-4vhost-2vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-eth-4vhost-2vm-ndrdisc - 40ge2p1xl710-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-ip4base-eth-4vhost-2vm-ndrdisc VPP NDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented in the graph below. @@ -64,18 +73,27 @@ git repository: 10ge2p1x520-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc 10ge2p1x520-dot1q-l2xcbase-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc - 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc - 10ge2p1x520-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrdisc - 10ge2p1x520-eth-l2xcbase-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc - 10ge2p1x520-eth-l2xcbase-eth-4vhost-2vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-eth-4vhost-2vm-ndrdisc - 10ge2p1x520-ethip4-ip4base-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-eth-2vhost-1vm-ndrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhostvr1024-1vm-cfsrr1-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-eth-2vhostvr1024-1vm-cfsrr1-ndrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhostvr1024-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-eth-2vhostvr1024-1vm-ndrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhostvr256-1vm-cfsrr1-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-eth-2vhostvr256-1vm-cfsrr1-ndrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhostvr256-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-eth-2vhostvr256-1vm-ndrdisc 10ge2p1x520-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-eth-4vhost-2vm-ndrdisc 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhostvr1024-1vm-cfsrr1-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhostvr1024-1vm-cfsrr1-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhostvr1024-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhostvr1024-1vm-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhostvr256-1vm-cfsrr1-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhostvr256-1vm-cfsrr1-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhostvr256-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhostvr256-1vm-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrdisc + 10ge2p1x520-eth-l2xcbase-eth-2vhostvr1024-1vm-cfsrr1-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-eth-2vhostvr1024-1vm-cfsrr1-ndrdisc + 10ge2p1x520-eth-l2xcbase-eth-2vhostvr1024-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-eth-2vhostvr1024-1vm-ndrdisc + 10ge2p1x520-eth-l2xcbase-eth-2vhostvr256-1vm-cfsrr1-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-eth-2vhostvr256-1vm-cfsrr1-ndrdisc + 10ge2p1x520-eth-l2xcbase-eth-2vhostvr256-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-eth-2vhostvr256-1vm-ndrdisc + 10ge2p1x520-eth-l2xcbase-eth-4vhost-2vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-eth-4vhost-2vm-ndrdisc 10ge2p1x710-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc + 40ge2p1xl710-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-ip4base-eth-4vhost-2vm-ndrdisc 40ge2p1xl710-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc 40ge2p1xl710-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrdisc 40ge2p1xl710-eth-l2xcbase-eth-4vhost-2vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-eth-4vhost-2vm-ndrdisc - 40ge2p1xl710-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-ip4base-eth-4vhost-2vm-ndrdisc PDR Throughput ~~~~~~~~~~~~~~ @@ -100,18 +118,27 @@ git repository: 10ge2p1x520-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc02-64B-1t1c-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc 10ge2p1x520-dot1q-l2xcbase-eth-2vhost-1vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2xcbase-eth-2vhost-1vm-pdrdisc - 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc - 10ge2p1x520-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-eth-4vhost-2vm-pdrdisc - 10ge2p1x520-eth-l2xcbase-eth-2vhost-1vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2xcbase-eth-2vhost-1vm-pdrdisc - 10ge2p1x520-eth-l2xcbase-eth-4vhost-2vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2xcbase-eth-4vhost-2vm-pdrdisc - 10ge2p1x520-ethip4-ip4base-eth-2vhost-1vm-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4base-eth-2vhost-1vm-pdrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhostvr1024-1vm-cfsrr1-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4base-eth-2vhostvr1024-1vm-cfsrr1-pdrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhostvr1024-1vm-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4base-eth-2vhostvr1024-1vm-pdrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhostvr256-1vm-cfsrr1-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4base-eth-2vhostvr256-1vm-cfsrr1-pdrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhostvr256-1vm-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4base-eth-2vhostvr256-1vm-pdrdisc 10ge2p1x520-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4base-eth-4vhost-2vm-pdrdisc 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhostvr1024-1vm-cfsrr1-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhostvr1024-1vm-cfsrr1-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhostvr1024-1vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhostvr1024-1vm-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhostvr256-1vm-cfsrr1-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhostvr256-1vm-cfsrr1-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhostvr256-1vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhostvr256-1vm-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-eth-4vhost-2vm-pdrdisc + 10ge2p1x520-eth-l2xcbase-eth-2vhostvr1024-1vm-cfsrr1-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2xcbase-eth-2vhostvr1024-1vm-cfsrr1-pdrdisc + 10ge2p1x520-eth-l2xcbase-eth-2vhostvr1024-1vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2xcbase-eth-2vhostvr1024-1vm-pdrdisc + 10ge2p1x520-eth-l2xcbase-eth-2vhostvr256-1vm-cfsrr1-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2xcbase-eth-2vhostvr256-1vm-cfsrr1-pdrdisc + 10ge2p1x520-eth-l2xcbase-eth-2vhostvr256-1vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2xcbase-eth-2vhostvr256-1vm-pdrdisc + 10ge2p1x520-eth-l2xcbase-eth-4vhost-2vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2xcbase-eth-4vhost-2vm-pdrdisc 10ge2p1x710-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc + 40ge2p1xl710-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-ip4base-eth-4vhost-2vm-pdrdisc 40ge2p1xl710-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc 40ge2p1xl710-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-eth-4vhost-2vm-pdrdisc 40ge2p1xl710-eth-l2xcbase-eth-4vhost-2vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2xcbase-eth-4vhost-2vm-pdrdisc - 40ge2p1xl710-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-ip4base-eth-4vhost-2vm-pdrdisc VPP PDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented in the graph below. PDR measured for 0.5% packet loss ratio. @@ -133,16 +160,24 @@ git repository: 10ge2p1x520-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc08-64B-2t2c-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc 10ge2p1x520-dot1q-l2xcbase-eth-2vhost-1vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2xcbase-eth-2vhost-1vm-pdrdisc - 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc - 10ge2p1x520-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-eth-4vhost-2vm-pdrdisc - 10ge2p1x520-eth-l2xcbase-eth-2vhost-1vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2xcbase-eth-2vhost-1vm-pdrdisc - 10ge2p1x520-eth-l2xcbase-eth-4vhost-2vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2xcbase-eth-4vhost-2vm-pdrdisc - 10ge2p1x520-ethip4-ip4base-eth-2vhost-1vm-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4-ip4base-eth-2vhost-1vm-pdrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhostvr1024-1vm-cfsrr1-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4-ip4base-eth-2vhostvr1024-1vm-cfsrr1-pdrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhostvr1024-1vm-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4-ip4base-eth-2vhostvr1024-1vm-pdrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhostvr256-1vm-cfsrr1-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4-ip4base-eth-2vhostvr256-1vm-cfsrr1-pdrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhostvr256-1vm-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4-ip4base-eth-2vhostvr256-1vm-pdrdisc 10ge2p1x520-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4-ip4base-eth-4vhost-2vm-pdrdisc 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhostvr1024-1vm-cfsrr1-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhostvr1024-1vm-cfsrr1-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhostvr1024-1vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhostvr1024-1vm-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhostvr256-1vm-cfsrr1-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhostvr256-1vm-cfsrr1-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhostvr256-1vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhostvr256-1vm-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-eth-4vhost-2vm-pdrdisc + 10ge2p1x520-eth-l2xcbase-eth-2vhostvr1024-1vm-cfsrr1-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2xcbase-eth-2vhostvr1024-1vm-cfsrr1-pdrdisc + 10ge2p1x520-eth-l2xcbase-eth-2vhostvr1024-1vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2xcbase-eth-2vhostvr1024-1vm-pdrdisc + 10ge2p1x520-eth-l2xcbase-eth-2vhostvr256-1vm-cfsrr1-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2xcbase-eth-2vhostvr256-1vm-cfsrr1-pdrdisc + 10ge2p1x520-eth-l2xcbase-eth-2vhostvr256-1vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2xcbase-eth-2vhostvr256-1vm-pdrdisc + 10ge2p1x520-eth-l2xcbase-eth-4vhost-2vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2xcbase-eth-4vhost-2vm-pdrdisc 10ge2p1x710-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc + 40ge2p1xl710-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-ip4base-eth-4vhost-2vm-pdrdisc 40ge2p1xl710-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc 40ge2p1xl710-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-eth-4vhost-2vm-pdrdisc 40ge2p1xl710-eth-l2xcbase-eth-4vhost-2vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2xcbase-eth-4vhost-2vm-pdrdisc - 40ge2p1xl710-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-ip4base-eth-4vhost-2vm-pdrdisc - diff --git a/docs/report/vpp_performance_tests/test_environment.rst b/docs/report/vpp_performance_tests/test_environment.rst index 2ce5f8ad4f..68f4365a4d 100644 --- a/docs/report/vpp_performance_tests/test_environment.rst +++ b/docs/report/vpp_performance_tests/test_environment.rst @@ -4,6 +4,132 @@ Test Environment To execute performance tests, there are three identical testbeds, each testbed consists of two SUTs and one TG. +Naming Convention +----------------- + +Following naming convention is used within this page to specify physical +connectivity and wiring across defined CSIT testbeds: + +- testbedname: testbedN. +- hostname: + + - traffic-generator: tN-tgW. + - system-under-testX: tN-sutX. + +- portnames: + + - tN-tgW-cY/pZ. + - tN-sutX-cY/pZ. + +- where: + + - N - testbed number. + - tgW - server acts as traffic-generator with W index. + - sutX - server acts as system-under-test with X index. + - Y - PCIe slot number denoting a NIC card number within the host. + + - Y=1,2,3 - slots in Riser 1, Right PCIe Riser Board, NUMA node 0. + - Y=4,5,6 - slots in Riser 2, Left PCIe Riser Board, NUMA node 1. + - Y=m - the MLOM slot. + + - Z - port number on the NIC card. + +Server HW Configuration +----------------------- + +CSIT testbed contains following three HW configuration types of UCS x86 servers, +across total of ten servers provided: + +#. Type-1: Purpose - VPP functional and performance conformance testing. + + - Quantity: 6 computers as SUT hosts (Systems Under Test). + - Physical connectivity: + + - CIMC and host management ports. + - NIC ports connected in 3-node topologies. + + - Main HW configuration: + + - Chassis: UCSC-C240-M4SX with 6 PCIe3.0 slots. + - Processors: 2* E5-2699 2.3 GHz. + - RAM Memory: 16* 32GB DDR4-2133MHz. + - Disks: 2* 2TB 12G SAS 7.2K RPM SFF HDD. + + - NICs configuration: + + - Right PCIe Riser Board (Riser 1) (x8, x8, x8 PCIe3.0 lanes) + + - PCIe Slot1: Cisco VIC 1385 2p40GE. + + - PCIe Slot2: Intel NIC x520 2p10GE. + - PCIe Slot3: empty. + + - Left PCIe Riser Board (Riser 2) (x8, x16, x8 PCIe3.0 lanes) + + - PCIe Slot4: Intel NIC xl710 2p40GE. + - PCIe Slot5: Intel NIC x710 2p10GE. + - PCIe Slot6: Intel QAT 8950 50G (Walnut Hill) + + - MLOM slot: Cisco VIC 1227 2p10GE (x8 PCIe2.0 lanes). + +#. Type-2: Purpose - VPP functional and performance conformance testing. + + - Quantity: 3 computers as TG hosts (Traffic Generators). + - Physical connectivity: + + - CIMC and host management ports. + - NIC ports connected in 3-node topologies. + + - Main HW configuration: + + - Chassis: UCSC-C240-M4SX with 6 PCIe3.0 slots. + - Processors: 2* E5-2699 2.3 GHz. + - RAM Memory: 16* 32GB DDR4-2133MHz. + - Disks: 2* 2TB 12G SAS 7.2K RPM SFF HDD. + + - NICs configuration: + + - Right PCIe Riser Board (Riser 1) (x8, x8, x8 lanes) + + - PCIe Slot1: Intel NIC xl710 2p40GE. + - PCIe Slot2: Intel NIC x710 2p10GE. + - PCIe Slot3: Intel NIC x710 2p10GE. + + - Left PCIe Riser Board (Riser 2) (x8, x16, x8 lanes) + + - PCIe Slot4: Intel NIC xl710 2p40GE. + - PCIe Slot5: Intel NIC x710 2p10GE. + - PCIe Slot6: Intel NIC x710 2p10GE. + + - MLOM slot: empty. + +#. Type-3: Purpose - VIRL functional conformance. + + - Quantity: 3 computers as VIRL hosts. + - Physical connectivity: + + - CIMC and host management ports. + - no NIC ports, standalone setup. + + - Main HW configuration: + + - Chassis: UCSC-C240-M4SX with 6 PCIe3.0 slots. + - Processors: 2* E5-2699 2.3 GHz. + - RAM Memory: 16* 32GB DDR4-2133MHz. + - Disks: 2* 480 GB 2.5inch 6G SATA SSD. + + - NICs configuration: + + - Right PCIe Riser Board (Riser 1) (x8, x8, x8 lanes) + + - no cards. + + - Left PCIe Riser Board (Riser 2) (x8, x16, x8 lanes) + + - no cards. + + - MLOM slot: empty. + SUT Configuration - Host HW --------------------------- Host hardware details (CPU, memory, NIC layout) and physical topology are @@ -12,9 +138,10 @@ described in detail in **Host configuration** -- All hosts are Cisco UCS C240-M4 (2x Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz, - 18c, 512GB RAM) - :: +All hosts are Cisco UCS C240-M4 (2x Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz, +18c, 512GB RAM) + +:: $ lscpu Architecture: x86_64 @@ -42,8 +169,9 @@ described in detail in NUMA node1 CPU(s): 18-35 Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm epb tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid cqm xsaveopt cqm_llc cqm_occup_llc dtherm arat pln pts -- BIOS settings - :: +**BIOS settings** + +:: C240 /bios # show advanced detail Set-up parameters: @@ -114,18 +242,19 @@ described in detail in CDN Support for VIC: Disabled Out-of-Band Management: Disabled -- In addition to CIMC and Management, each TG has 4x Intel X710 10GB NIC - (=8 ports) and 2x Intel XL710 40GB NIC (=4 ports), whereas each SUT has: +**NIC models and placement** - - 1x Intel X520 NIC (10GB, 2 ports), - - 1x Cisco VIC 1385 (40GB, 2 ports), - - 1x Intel XL710 NIC (40GB, 2 ports), - - 1x Intel X710 NIC (10GB, 2 ports), - - 1x Cisco VIC 1227 (10GB, 2 ports). - - This allows for a total of five ring topologies, each using ports on - specific NIC model, enabling per NIC model benchmarking. +In addition to CIMC and Management, each TG has 4x Intel X710 10GB NIC +(=8 ports) and 2x Intel XL710 40GB NIC (=4 ports), whereas each SUT has: -**NIC models and placement** +- 1x Intel X520 NIC (10GB, 2 ports), +- 1x Cisco VIC 1385 (40GB, 2 ports), +- 1x Intel XL710 NIC (40GB, 2 ports), +- 1x Intel X710 NIC (10GB, 2 ports), +- 1x Cisco VIC 1227 (10GB, 2 ports). + +This allows for a total of five ring topologies, each using ports on specific +NIC model, enabling per NIC model benchmarking. - 0a:00.0 Ethernet controller: Intel Corporation 82599ES 10-Gigabit SFI/SFP+ Network Connection (rev 01) Subsystem: Intel Corporation Ethernet Server @@ -157,10 +286,13 @@ described in detail in SUT Configuration - Host OS Linux --------------------------------- -Software details (OS, configuration) are described in -`CSIT/CSIT_LF_testbed `_. +Software details (OS, configuration) are described in `LF FDio CSIT testbed +wiki page `_. + +System provisioning is done by combination of PXE boot unattented install and +`Ansible `_ described in `CSIT Testbed Setup`_. -Below a subset of the configuration: +Below a subset of the running configuration: :: @@ -233,11 +365,39 @@ Below a subset of the configuration: Node 1 HugePages_Free: 2048 Node 1 HugePages_Surp: 0 +**Kernel boot parameters used in CSIT performance testbeds** + +- **isolcpus=-** used for all cpu cores apart from + first core of each socket used for running VPP worker threads and Qemu/LXC + processes https://www.kernel.org/doc/Documentation/kernel-parameters.txt +- **intel_pstate=disable** - [X86] Do not enable intel_pstate as the default + scaling driver for the supported processors. Intel P-State driver decide what + P-state (CPU core power state) to use based on requesting policy from the + cpufreq core. [X86 - Either 32-bit or 64-bit x86] + https://www.kernel.org/doc/Documentation/cpu-freq/intel-pstate.txt +- **nohz_full=-** - [KNL,BOOT] In kernels built with + CONFIG_NO_HZ_FULL=y, set the specified list of CPUs whose tick will be stopped + whenever possible. The boot CPU will be forced outside the range to maintain + the timekeeping. The CPUs in this range must also be included in the + rcu_nocbs= set. Specifies the adaptive-ticks CPU cores, causing kernel to + avoid sending scheduling-clock interrupts to listed cores as long as they have + a single runnable task. [KNL - Is a kernel start-up parameter, SMP - The + kernel is an SMP kernel]. + https://www.kernel.org/doc/Documentation/timers/NO_HZ.txt +- **rcu_nocbs** - [KNL] In kernels built with CONFIG_RCU_NOCB_CPU=y, set the + specified list of CPUs to be no-callback CPUs, that never queue RCU callbacks + (read-copy update). + https://www.kernel.org/doc/Documentation/kernel-parameters.txt + +**Applied command line boot parameters:** + :: $ cat /proc/cmdline BOOT_IMAGE=/vmlinuz-4.4.0-72-generic root=UUID=35ea11e4-e44f-4f67-8cbe-12f09c49ed90 ro isolcpus=1-17,19-35 nohz_full=1-17,19-35 rcu_nocbs=1-17,19-35 intel_pstate=disable console=tty0 console=ttyS0,115200n8 +**Mount listing** + :: $ cat /proc/mounts @@ -273,6 +433,8 @@ Below a subset of the configuration: none /mnt/huge hugetlbfs rw,relatime,pagesize=2048k 0 0 lxcfs /var/lib/lxcfs fuse.lxcfs rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other 0 0 +**Package listing** + :: $ dpkg -l @@ -808,6 +970,8 @@ Below a subset of the configuration: ii zlib1g:amd64 1:1.2.8.dfsg-2ubuntu4 amd64 compression library - runtime ii zlib1g-dev:amd64 1:1.2.8.dfsg-2ubuntu4 amd64 compression library - development +**Kernel module listing** + :: $ lsmod | sort @@ -897,6 +1061,8 @@ Below a subset of the configuration: xt_CHECKSUM 16384 1 xt_tcpudp 16384 5 +**Sysctl listing** + :: $ sysctl -a @@ -1841,6 +2007,8 @@ Below a subset of the configuration: vm.vfs_cache_pressure = 100 vm.zone_reclaim_mode = 0 +**Services listing** + :: $ service --status-all @@ -1888,6 +2056,72 @@ Below a subset of the configuration: [ + ] uuidd [ - ] x11-common +**Host CFS optimizations (QEMU+VPP)** + +Applying CFS scheduler tuning on all Qemu vcpu worker threads (those are +handling testpmd - pmd threads) and VPP PMD worker threads. List of VPP PMD +threads can be obtained e.g. from: + +:: + + $ for psid in $(pgrep vpp) + $ do + $ for tid in $(ps -Lo tid --pid $psid | grep -v TID) + $ do + $ echo $tid + $ done + $ done + +Or: + +:: + + $ cat /proc/`pidof vpp`/task/*/stat | awk '{print $1" "$2" "$39}' + +Applying Round-robin scheduling with highest priority + +:: + + $ for psid in $(pgrep vpp) + $ do + $ for tid in $(ps -Lo tid --pid $psid | grep -v TID) + $ do + $ chrt -r -p 1 $tid + $ done + $ done + +More information about Linux CFS can be found in: `Sched manual pages +`_. + + +**Host IRQ affinity** + +Changing the default pinning of every IRQ to core 0. (Same does apply on both +guest VM and host OS) + +:: + + $ for l in `ls /proc/irq`; do echo 1 | sudo tee /proc/irq/$l/smp_affinity; done + +**Host RCU affinity** + +Changing the default pinning of RCU to core 0. (Same does apply on both guest VM +and host OS) + +:: + + $ for i in `pgrep rcu[^c]` ; do sudo taskset -pc 0 $i ; done + +**Host Writeback affinity** + +Changing the default pinning of writebacks to core 0. (Same does apply on both +guest VM and host OS) + +:: + + $ echo 1 | sudo tee /sys/bus/workqueue/devices/writeback/cpumask + + DUT Configuration - VPP ----------------------- @@ -1903,7 +2137,7 @@ DUT Configuration - VPP :: - $ dpkg -i --force-all + $ dpkg -i --force-all vpp* **VPP Startup Configuration** @@ -2040,6 +2274,12 @@ DPDK v17.05 - dest_mac : [0x3c,0xfd,0xfe,0x9c,0xee,0xf4] src_mac : [0x3c,0xfd,0xfe,0x9c,0xee,0xf5] +**TG Startup Command** + +:: + + $ sh -c 'cd /scripts/ && sudo nohup ./t-rex-64 -i -c 7 --iom 0 > /dev/null 2>&1 &'> /dev/null + **TG common API - pointer to driver** `TRex driver`_ diff --git a/resources/tools/report_gen/conf.py b/resources/tools/report_gen/conf.py index 4c90e55ddf..93f39024c6 100644 --- a/resources/tools/report_gen/conf.py +++ b/resources/tools/report_gen/conf.py @@ -69,14 +69,17 @@ rst_epilog = """ .. _tag documentation rst file: https://git.fd.io/csit/tree/docs/tag_documentation.rst?h=rls1707 .. _TRex intallation: https://git.fd.io/csit/tree/resources/tools/trex/trex_installer.sh?h=rls1707 .. _TRex driver: https://git.fd.io/csit/tree/resources/tools/trex/trex_stateless_profile.py?h=rls1707 -.. _CSIT Honeycomb Functional Tests Documentation: https://docs.fd.io/csit/rls1707/doc/tests.func.html -.. _CSIT DPDK Performance Tests Documentation: https://docs.fd.io/csit/rls1707/doc/tests.perf.html -.. _CSIT VPP Functional Tests Documentation: https://docs.fd.io/csit/rls1707/doc/tests.func.html -.. _CSIT VPP Performance Tests Documentation: https://docs.fd.io/csit/rls1707/doc/tests.perf.html +.. _CSIT Honeycomb Functional Tests Documentation: https://docs.fd.io/csit/rls1707/doc/tests.vpp.func.html +.. _CSIT DPDK Performance Tests Documentation: https://docs.fd.io/csit/rls1707/doc/tests.dpdk.perf.html +.. _CSIT VPP Functional Tests Documentation: https://docs.fd.io/csit/rls1707/doc/tests.vpp.func.html +.. _CSIT VPP Performance Tests Documentation: https://docs.fd.io/csit/rls1707/doc/tests.vpp.perf.html +.. _CSIT NSH_SFC Functional Tests Documentation: https://docs.fd.io/csit/rls1707/doc/tests.nsh_sfc.func.html .. _VPP test framework documentation: https://docs.fd.io/vpp/17.07/vpp_make_test/html/ .. _FD.io test executor vpp performance jobs: https://jenkins.fd.io/view/csit/job/csit-vpp-perf-1707-all .. _FD.io test executor dpdk performance jobs: https://jenkins.fd.io/view/csit/job/csit-dpdk-perf-1707-all .. _FD.io VPP compile job: https://jenkins.fd.io/view/vpp/job/vpp-merge-1707-ubuntu1604/ +.. _FD.io VPP compile job: https://jenkins.fd.io/view/vpp/job/vpp-merge-1707-ubuntu1604/ +.. _CSIT Testbed Setup: https://git.fd.io/csit/tree/resources/tools/testbed-setup/README.md?h=rls1707 """ # The language for content autogenerated by Sphinx. Refer to documentation diff --git a/resources/tools/report_gen/run_report.sh b/resources/tools/report_gen/run_report.sh index e11c68e8ee..494ec5650a 100755 --- a/resources/tools/report_gen/run_report.sh +++ b/resources/tools/report_gen/run_report.sh @@ -41,18 +41,18 @@ sudo apt-get -y update sudo apt-get -y install libxml2 libxml2-dev libxslt-dev build-essential \ zlib1g-dev unzip -# Clean-up when finished: +# Clean-up when finished trap 'rm -rf ${WORKING_DIR}; exit' EXIT trap 'rm -rf ${WORKING_DIR}; exit' ERR -# Remove the old build: +# Remove the old build rm -rf ${BUILD_DIR} || true rm -rf ${WORKING_DIR} || true # Create working directories mkdir ${BUILD_DIR} -# Create virtual environment: +# Create virtual environment virtualenv ${WORKING_DIR}/env . ${WORKING_DIR}/env/bin/activate @@ -72,22 +72,23 @@ mkdir -p ${PLOT_DPDK_SOURCE_DIR} ### VPP PERFORMANCE SOURCE DATA JEN_FILE_PERF='output_perf_data.xml' -JEN_JOB='csit-vpp-perf-1704-all' -JEN_BUILD=(6 7 8 9 10 12 14 15 16 17) +JEN_JOB='csit-vpp-perf-master-all' +JEN_BUILD=(1567) +JEN_FBUILD=1567 for i in "${JEN_BUILD[@]}"; do curl --fail -fs ${CSIT_JEN_URL}/${JEN_JOB}/${i}/robot/report/output_perf_data.xml \ -o ${PLOT_VPP_SOURCE_DIR}/${JEN_JOB}-${i}.xml if [[ ${DEBUG} -eq 1 ]] ; then - cp ./${JEN_JOB}-10.zip ${STATIC_DIR_ARCH}/${JEN_JOB}-10.zip + cp ./${JEN_JOB}-${JEN_FBUILD}.zip ${STATIC_DIR_ARCH}/${JEN_JOB}-${JEN_FBUILD}.zip else curl --fail -fs ${CSIT_JEN_URL}/${JEN_JOB}/${i}/robot/report/\*zip\*/robot-plugin.zip \ -o ${STATIC_DIR_ARCH}/${JEN_JOB}-${i}.zip fi done -unzip -o ${STATIC_DIR_ARCH}/${JEN_JOB}-10.zip -d ${WORKING_DIR}/ +unzip -o ${STATIC_DIR_ARCH}/${JEN_JOB}-${JEN_FBUILD}.zip -d ${WORKING_DIR}/ python run_robot_data.py -i ${WORKING_DIR}/robot-plugin/output.xml \ --output ${DTR_PERF_SOURCE_DIR}/vpp_performance_results.rst \ --formatting rst --start 4 --level 2 @@ -97,48 +98,52 @@ python run_robot_teardown_data.py -i ${WORKING_DIR}/robot-plugin/output.xml \ python run_robot_teardown_data.py -i ${WORKING_DIR}/robot-plugin/output.xml \ -o ${DTO_PERF_SOURCE_OPER_DIR}/vpp_performance_operational_data.rst \ --data "SH_RUN" -f "rst" --start 4 --level 2 -sed -i -e "s@###JOB###@${JEN_JOB}\/75@g" \ - ${DTR_PERF_SOURCE_DIR}/index.rst -sed -i -e "s@###LINK###@${CSIT_JEN_URL}\/${JEN_JOB}\/75@g" \ - ${DTR_PERF_SOURCE_DIR}/index.rst -sed -i -e "s@###JOB###@${JEN_JOB}\/75@g" \ - ${DTC_PERF_SOURCE_DIR}/index.rst -sed -i -e "s@###LINK###@${CSIT_JEN_URL}\/${JEN_JOB}\/75@g" \ - ${DTC_PERF_SOURCE_DIR}/index.rst -sed -i -e "s@###JOB###@${JEN_JOB}\/75@g" \ - ${DTO_PERF_SOURCE_OPER_DIR}/index.rst -sed -i -e "s@###LINK###@${CSIT_JEN_URL}\/${JEN_JOB}\/75@g" \ - ${DTO_PERF_SOURCE_OPER_DIR}/index.rst +if [[ ${DEBUG} -eq 0 ]] ; +then + sed -i -e "s@###JOB###@${JEN_JOB}\/${JEN_FBUILD}@g" \ + ${DTR_PERF_SOURCE_DIR}/index.rst + sed -i -e "s@###LINK###@${CSIT_JEN_URL}\/${JEN_JOB}\/${JEN_FBUILD}@g" \ + ${DTR_PERF_SOURCE_DIR}/index.rst + sed -i -e "s@###JOB###@${JEN_JOB}\/${JEN_FBUILD}@g" \ + ${DTC_PERF_SOURCE_DIR}/index.rst + sed -i -e "s@###LINK###@${CSIT_JEN_URL}\/${JEN_JOB}\/${JEN_FBUILD}@g" \ + ${DTC_PERF_SOURCE_DIR}/index.rst + sed -i -e "s@###JOB###@${JEN_JOB}\/${JEN_FBUILD}@g" \ + ${DTO_PERF_SOURCE_OPER_DIR}/index.rst + sed -i -e "s@###LINK###@${CSIT_JEN_URL}\/${JEN_JOB}\/${JEN_FBUILD}@g" \ + ${DTO_PERF_SOURCE_OPER_DIR}/index.rst +fi ### DPDK PERFORMANCE SOURCE DATA -JEN_JOB='csit-dpdk-perf-master-all' -JEN_BUILD=(13 14 15 16) +JEN_JOB='csit-dpdk-perf-1707-all' +JEN_BUILD=(1 2 3 4) for i in "${JEN_BUILD[@]}"; do curl --fail -fs ${CSIT_JEN_URL}/${JEN_JOB}/${i}/robot/report/output_perf_data.xml \ -o ${PLOT_DPDK_SOURCE_DIR}/${JEN_JOB}-${i}.xml if [[ ${DEBUG} -eq 1 ]] ; then - cp ./${JEN_JOB}-16.zip ${STATIC_DIR_ARCH}/${JEN_JOB}-16.zip + cp ./${JEN_JOB}-${JEN_BUILD[-1]}.zip ${STATIC_DIR_ARCH}/${JEN_JOB}-${JEN_BUILD[-1]}.zip else curl --fail -fs ${CSIT_JEN_URL}/${JEN_JOB}/${i}/robot/report/\*zip\*/robot-plugin.zip \ -o ${STATIC_DIR_ARCH}/${JEN_JOB}-${i}.zip + + sed -i -e "s@###JOB###@${JEN_JOB}\/${JEN_BUILD[-1]}@g" \ + ${DTR_DPDK_SOURCE_DIR}/index.rst + sed -i -e "s@###LINK###@${CSIT_JEN_URL}\/${JEN_JOB}\/${JEN_BUILD[-1]}@g" \ + ${DTR_DPDK_SOURCE_DIR}/index.rst fi done -unzip -o ${STATIC_DIR_ARCH}/${JEN_JOB}-16.zip -d ${WORKING_DIR}/ +unzip -o ${STATIC_DIR_ARCH}/${JEN_JOB}-${JEN_BUILD[-1]}.zip -d ${WORKING_DIR}/ python run_robot_data.py -i ${WORKING_DIR}/robot-plugin/output.xml \ --output ${DTR_DPDK_SOURCE_DIR}/dpdk_performance_results.rst \ - --formatting rst --start 3 --level 2 -sed -i -e "s@###JOB###@${JEN_JOB}\/16@g" \ - ${DTR_DPDK_SOURCE_DIR}/index.rst -sed -i -e "s@###LINK###@${CSIT_JEN_URL}\/${JEN_JOB}\/16@g" \ - ${DTR_DPDK_SOURCE_DIR}/index.rst + --formatting rst --start 4 --level 2 ### FUNCTIONAL SOURCE DATA -JEN_JOB='csit-vpp-functional-master-ubuntu1604-virl' +JEN_JOB='csit-vpp-functional-1707-ubuntu1604-virl' JEN_BUILD='lastSuccessfulBuild' if [[ ${DEBUG} -eq 1 ]] ; @@ -147,6 +152,15 @@ then else curl -fs ${CSIT_JEN_URL}/${JEN_JOB}/${JEN_BUILD}/robot/report/\*zip\*/robot-plugin.zip \ -o ${STATIC_DIR_ARCH}/${JEN_JOB}-${JEN_BUILD}.zip + + sed -i -e "s@###JOB###@${JEN_JOB}\/${JEN_BUILD}@g" \ + ${DTR_FUNC_SOURCE_DIR}/index.rst + sed -i -e "s@###LINK###@${CSIT_JEN_URL}\/${JEN_JOB}\/${JEN_BUILD}@g" \ + ${DTR_FUNC_SOURCE_DIR}/index.rst + sed -i -e "s@###JOB###@${JEN_JOB}\/${JEN_BUILD}@g" \ + ${DTC_FUNC_SOURCE_DIR}/index.rst + sed -i -e "s@###LINK###@${CSIT_JEN_URL}\/${JEN_JOB}\/${JEN_BUILD}@g" \ + ${DTC_FUNC_SOURCE_DIR}/index.rst fi unzip -o ${STATIC_DIR_ARCH}/${JEN_JOB}-${JEN_BUILD}.zip -d ${WORKING_DIR}/ @@ -156,18 +170,10 @@ python run_robot_data.py -i ${WORKING_DIR}/robot-plugin/output.xml \ python run_robot_teardown_data.py -i ${WORKING_DIR}/robot-plugin/output.xml \ --output ${DTC_FUNC_SOURCE_DIR}/vpp_functional_configuration.rst \ --data "VAT_H" -f "rst" --start 5 --level 2 -sed -i -e "s@###JOB###@${JEN_JOB}\/${JEN_BUILD}@g" \ - ${DTR_FUNC_SOURCE_DIR}/index.rst -sed -i -e "s@###LINK###@${CSIT_JEN_URL}\/${JEN_JOB}\/${JEN_BUILD}@g" \ - ${DTR_FUNC_SOURCE_DIR}/index.rst -sed -i -e "s@###JOB###@${JEN_JOB}\/${JEN_BUILD}@g" \ - ${DTC_FUNC_SOURCE_DIR}/index.rst -sed -i -e "s@###LINK###@${CSIT_JEN_URL}\/${JEN_JOB}\/${JEN_BUILD}@g" \ - ${DTC_FUNC_SOURCE_DIR}/index.rst ### HONEYCOMB SOURCE DATA -JEN_JOB='hc2vpp-csit-integration-master-ubuntu1604' +JEN_JOB='hc2vpp-csit-integration-1707-ubuntu1604' JEN_BUILD='lastSuccessfulBuild' if [[ ${DEBUG} -eq 1 ]] ; @@ -176,21 +182,22 @@ then else curl -fs ${HC_JEN_URL}/${JEN_JOB}/${JEN_BUILD}/robot/report/\*zip\*/robot-plugin.zip \ -o ${STATIC_DIR_ARCH}/${JEN_JOB}-${JEN_BUILD}.zip + + sed -i -e "s@###JOB###@${JEN_JOB}\/${JEN_BUILD}@g" \ + ${DTR_HONEYCOMB_SOURCE_DIR}/index.rst + sed -i -e "s@###LINK###@${HC_JEN_URL}\/${JEN_JOB}\/${JEN_BUILD}@g" \ + ${DTR_HONEYCOMB_SOURCE_DIR}/index.rst fi unzip -o ${STATIC_DIR_ARCH}/${JEN_JOB}-${JEN_BUILD}.zip -d ${WORKING_DIR}/ python run_robot_data.py -i ${WORKING_DIR}/robot-plugin/output.xml \ --output ${DTR_HONEYCOMB_SOURCE_DIR}/honeycomb_functional_results.rst \ - --formatting rst --start 3 --level 2 -sed -i -e "s@###JOB###@${JEN_JOB}\/${JEN_BUILD}@g" \ - ${DTR_HONEYCOMB_SOURCE_DIR}/index.rst -sed -i -e "s@###LINK###@${HC_JEN_URL}\/${JEN_JOB}\/${JEN_BUILD}@g" \ - ${DTR_HONEYCOMB_SOURCE_DIR}/index.rst + --formatting rst --start 5 --level 2 # Delete temporary json files find ${SOURCE_DIR} -name "*.json" -type f -delete -# Generate the documentation: +# Generate the documentation DATE=$(date -u '+%d-%b-%Y') @@ -354,11 +361,13 @@ python run_plot.py --input ${PLOT_DPDK_SOURCE_DIR} \ python run_plot.py --input ${PLOT_DPDK_SOURCE_DIR} \ --output ${STATIC_DIR_DPDK}/64B-1t1c-ipv4-ndrdisc \ --title "64B-1t1c-ethip4-ip4base-l3fwd-ndrdisc" \ - --xpath '//*[@framesize="64B" and contains(@tags,"BASE") and contains(@tags,"NDRDISC") and contains(@tags,"1T1C") and contains(@tags,"IP4FWD")]' + --xpath '//*[@framesize="64B" and contains(@tags,"BASE") and contains(@tags,"NDRDISC") and contains(@tags,"1T1C") and contains(@tags,"IP4FWD")]' \ + --lower 2000000 --upper 12000000 python run_plot.py --input ${PLOT_DPDK_SOURCE_DIR} \ --output ${STATIC_DIR_DPDK}/64B-2t2c-ipv4-ndrdisc \ --title "64B-2t2c-ethip4-ip4base-l3fwd-ndrdisc" \ - --xpath '//*[@framesize="64B" and contains(@tags,"BASE") and contains(@tags,"NDRDISC") and contains(@tags,"2T2C") and contains(@tags,"IP4FWD")]' + --xpath '//*[@framesize="64B" and contains(@tags,"BASE") and contains(@tags,"NDRDISC") and contains(@tags,"2T2C") and contains(@tags,"IP4FWD")]' \ + --lower 2000000 --upper 12000000 python run_plot.py --input ${PLOT_DPDK_SOURCE_DIR} \ --output ${STATIC_DIR_DPDK}/64B-1t1c-l2-pdrdisc \ @@ -371,11 +380,13 @@ python run_plot.py --input ${PLOT_DPDK_SOURCE_DIR} \ python run_plot.py --input ${PLOT_DPDK_SOURCE_DIR} \ --output ${STATIC_DIR_DPDK}/64B-1t1c-ipv4-pdrdisc \ --title "64B-1t1c-ethip4-ip4base-l3fwd-pdrdisc" \ - --xpath '//*[@framesize="64B" and contains(@tags,"BASE") and contains(@tags,"PDRDISC") and not(contains(@tags,"NDRDISC")) and contains(@tags,"1T1C") and contains(@tags,"IP4FWD")]' + --xpath '//*[@framesize="64B" and contains(@tags,"BASE") and contains(@tags,"PDRDISC") and not(contains(@tags,"NDRDISC")) and contains(@tags,"1T1C") and contains(@tags,"IP4FWD")]' \ + --lower 20000000 --upper 30000000 python run_plot.py --input ${PLOT_DPDK_SOURCE_DIR} \ --output ${STATIC_DIR_DPDK}/64B-2t2c-ipv4-pdrdisc \ --title "64B-2t2c-ethip4-ip4base-l3fwd-pdrdisc" \ - --xpath '//*[@framesize="64B" and contains(@tags,"BASE") and contains(@tags,"PDRDISC") and not(contains(@tags,"NDRDISC")) and contains(@tags,"2T2C") and contains(@tags,"IP4FWD")]' + --xpath '//*[@framesize="64B" and contains(@tags,"BASE") and contains(@tags,"PDRDISC") and not(contains(@tags,"NDRDISC")) and contains(@tags,"2T2C") and contains(@tags,"IP4FWD")]' \ + --lower 20000000 --upper 30000000 # Plot latency -- cgit 1.2.3-korg