From 02f2a2176ec92efdf63399fb7dba1eb586465f38 Mon Sep 17 00:00:00 2001 From: Maciek Konstantynowicz Date: Fri, 21 Apr 2017 20:42:05 +0100 Subject: CSIT rls1704 report - update to handcrafted sections: - index.rst - vpp_performance_tests/csit_release_notes - vpp_performance_tests/overview.rst - vpp_performance_tests/packet_throughput_graphs/... - testpmd_performance_tests/csit_release_notes.rst - vpp_functional_tests/csit_release_notes.rst - testpmd_performance_tests/packet_latency_graphs/... - testpmd_performance_tests/packet_throughput_graphs/... Change-Id: I06bd27bc30d06d69873da74756ca586e12e55643 Signed-off-by: Maciek Konstantynowicz --- .../report/honeycomb_functional_tests/overview.rst | 392 ++++++------- docs/report/index.rst | 2 +- .../csit_release_notes.rst | 2 +- .../packet_latency_graphs/l2.rst | 61 +- .../packet_throughput_graphs/l2.rst | 104 ++-- .../vpp_functional_tests/csit_release_notes.rst | 12 +- .../vpp_performance_tests/csit_release_notes.rst | 229 ++++---- docs/report/vpp_performance_tests/overview.rst | 649 +++++++++++---------- .../packet_latency_graphs/ipsec.rst | 74 ++- .../packet_latency_graphs/ipv4.rst | 89 +-- .../packet_latency_graphs/ipv4_tunnels.rst | 56 +- .../packet_latency_graphs/ipv6.rst | 56 +- .../packet_latency_graphs/ipv6_tunnels.rst | 56 +- .../packet_latency_graphs/l2.rst | 56 +- .../packet_latency_graphs/vm_vhost.rst | 57 +- .../packet_throughput_graphs/ipsec.rst | 117 ++-- .../packet_throughput_graphs/ipv4.rst | 138 ++--- .../packet_throughput_graphs/ipv4_tunnels.rst | 110 ++-- .../packet_throughput_graphs/ipv6.rst | 132 +++-- .../packet_throughput_graphs/ipv6_tunnels.rst | 78 +-- .../packet_throughput_graphs/l2.rst | 142 ++--- .../packet_throughput_graphs/vm_vhost.rst | 111 ++-- 22 files changed, 1429 insertions(+), 1294 deletions(-) (limited to 'docs') diff --git a/docs/report/honeycomb_functional_tests/overview.rst b/docs/report/honeycomb_functional_tests/overview.rst index edac897cd9..9cf741e013 100644 --- a/docs/report/honeycomb_functional_tests/overview.rst +++ b/docs/report/honeycomb_functional_tests/overview.rst @@ -1,196 +1,196 @@ -Overview -======== - -Tested Virtual Topologies -------------------------- - -CSIT Honeycomb functional tests are executed on virtualized topologies created -using Virtual Internet Routing Lab (VIRL) simulation platform contributed by -Cisco. VIRL runs on physical baremetal servers hosted by LF FD.io project. All -tests are executed in two node logical test topology - Traffic Generator (TG) -node and Systems Under Test (SUT1) node connected in a loop. Logical test -topology is shown in the figure below. - -:: - - +------------------------+ - | | - | +------------------+ | - +---------------> <--------------+ - | | | | | | - | |------------> DUT1 <-----------+ | - | | | +------------------+ | | | - | | | | | | - | | | SUT1 | | | - | | +------------------------+ | | - | | | | - | | | | - | | +-----------+ | | - | +---------------> <---------------+ | - | | TG | | - +------------------> <------------------+ - +-----------+ - -SUT1 is a VM (Ubuntu or Centos, depending on the test suite), TG is a Traffic -Generator (TG, another Ubuntu VM). SUTs run Honeycomb and VPP SW applications -in Linux user-mode as a Device Under Test (DUT) within the VM. TG runs Scapy -SW application as a packet Traffic Generator. Logical connectivity between -SUTs and to TG is provided using virtual NICs using VMs' virtio driver. - -Virtual testbeds are created on-demand whenever a verification job is started -(e.g. triggered by the gerrit patch submission) and destroyed upon completion -of all functional tests. Each node is a Virtual Machine and each connection -that is drawn on the diagram is available for use in any test case. During the -test execution, all nodes are reachable thru the Management network connected -to every node via dedicated virtual NICs and virtual links (not shown above -for clarity). - -Functional Tests Coverage -------------------------- - -The following Honeycomb functional test areas are included in the CSIT |release| -with results listed in this report: - -- **Basic interface management** - CRUD for interface state, - - ipv4/ipv6 address, ipv4 neighbor, MTU value. - - Test case count: 7 -- **L2BD** - CRUD for L2 Bridge-Domain, interface assignment. - - Create up to two bridge domains with all implemented functions turned on. - - (flooding, unknown-unicast flooding, forwarding, learning, arp-termination) - - Assign up to two physical interfaces to a single bridge domain. - - Remove interface assignments, remove bridge domains. - - Test case count: 5 -- **L2FIB** - CRD for L2-FIB entries. - - Create 4 FIB entries - - (one of each for filter/forward, static/dynamic combinations). - - Remove FIB entries. - - Test case count: 7 -- **VxLAN** - CRD for VxLAN tunnels. - - Create VxLAN interface. - - Disable VxLAN interface. - - Re-create a disabled VxLAN interface. - - Test case count: 6 -- **VxLAN-GPE** - CRD for VxLAN GPE tunnels. - - Create VxLAN GPE interface. - - Disable VxLAN interface. - - Re-create a disabled VxLAN interface. - - Test case count: 7 -- **Vhost-user** - CRUD for Vhost-user interfaces. - - Create, modify and delete Vhost-user interface, as client and server. - - Test case count: 8 -- **TAP** - CRUD for Tap interface management. - - Create, modify and delete TAP interface. - - Test case count: 3 -- **VLAN** - CRUD for VLAN sub-interface management. - - Create VLAN sub-interface over a physical interface. - - Toggle interface state separately for super-interface and sub-interface. - - Configure IP address and bridge domain assignment on sub-interface. - - Configure VLAN tag rewrite on sub-interface. - - Test case count: 17 -- **ACL** - CRD for low-level classifiers: table and session management, - - interface assignment. - - Configure up to 2 classify tables. - - Configure up to 2 classify sessions on one table. - - Assign classify session to a physical interface. - - Remove tables, sessions, interface assignments. - - Test case count: 9 -- **PBB** - CRD for provider backbone bridge sub-interface. - - Configure, modify and remove a PBB sub-interface over a physical interface. - - Test case count: 9 -- **NSH_SFC** - CRD for NSH maps and entries, using NSH_SFC plugin. - - Configure up to 2 NSH entries. - - Configure up to 2 NSH maps. - - Modify and delete NSH maps and entries. - - Test case count: 8 -- **LISP** - CRD for Lisp: mapping, locator set, adjacency, map resolver. - - Toggle Lisp feature status. - - Configure and delete Lisp mapping as local and remote. - - Configure and delete Lisp adjacency mapping - - Configure and delete Lisp map resolver, proxy ITR. - - Test case count: 11 -- **NAT** - CRD for NAT entries, interface assignment. - - Configure and delete up to two NAT entries. - - Assign NAT entries to a physical interface. - - Test case count: 6 -- **Port mirroring** - CRD for SPAN port mirroring, interface assignment. - - Configure SPAN port mirroring on a physical interface, mirroring - - up to 2 interfaces. - - Remove SPAN configuration from interfaces. - - Test case count: 3 -- **ACL-PLUGIN** - CRD for high-level classifier - - MAC + IP address classification. - - IPv4, IPv6 address classification. - - TCP, UDP, ICMP, ICMPv6 protocol/next-header classification. - - port number classification. - - ICMP, ICMPv6 code and type classification. - - Test case count: 15 -- **ProxyARP** - CRD for proxyARP feature. - - Configure proxyARP. - - Assign to interface. - - Test case count: 3 -- **ProxyND6** - CRD for Neighbor Discovery Proxy. - - Configure ProxyND6 feature on interface. - - Test case count: 4 -- **DHCP Relay** - CRD for DHCP relay feature. - - Configure DHCP Relays. - - IPv4 and IPv6 variants. - - Test case count: 4 -- **SLAAC** - CRD for Stateless Address AutoConfiguration. - - Configure SLAAC feature on interfaces. - - Test case count: 7 -- **Routing** - CRD for routing. - - Configure single-hop route. - - Configure multi-hop routes. - - Configure blackhole route. - - IPv4 and IPv6 variants. - - Test case count: 6 -- **Honeycomb Infractructure** - configuration persistence, - - Netconf notifications for interface events, - - Netconf negative tests aimed at specific issues - -Total 158 Honeycomb tests in the CSIT |release|. - -Operational data in Honeycomb should mirror configuration data at all times. -Because of this, test cases follow this general pattern: - -#. read operational data of the feature using restconf. -#. read status of the feature using VPP API dump. -#. modify configuration of the feature using restconf. -#. verify changes to operational data using restconf. -#. verify changes using VPP API dump, OR -#. send a packet to VPP node and observe behaviour to verify configuration - -Test cases involving network interfaces utilize the first two interfaces on -the DUT node. - -Functional Tests Naming ------------------------ - -CSIT |release| introduced a common structured naming convention for all -performance and functional tests. This change was driven by substantially -growing number and type of CSIT test cases. Firstly, the original practice did -not always follow any strict naming convention. Secondly test names did not -always clearly capture tested packet encapsulations, and the actual type or -content of the tests. Thirdly HW configurations in terms of NICs, ports and -their locality were not captured either. These were but few reasons that drove -the decision to change and define a new more complete and stricter test naming -convention, and to apply this to all existing and new test cases. - -The new naming should be intuitive for majority of the tests. The complete -description of CSIT test naming convention is provided on `CSIT test naming -page `_. - -Here few illustrative examples of the new naming usage for functional test -suites: - -#. **Physical port to physical port - a.k.a. NIC-to-NIC, Phy-to-Phy, P2P** - - - *eth2p-ethip4-ip4base-func.robot* => 2 ports of Ethernet, IPv4 baseline - routed forwarding, functional tests. - -#. **Physical port to VM (or VM chain) to physical port - a.k.a. NIC2VM2NIC, - P2V2P, NIC2VMchain2NIC, P2V2V2P** - - - *eth2p-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-func.robot* => 2 ports of - Ethernet, IPv4 VXLAN Ethernet, L2 bridge-domain switching to/from two vhost - interfaces and one VM, functional tests. +Overview +======== + +Tested Virtual Topologies +------------------------- + +CSIT Honeycomb functional tests are executed on virtualized topologies created +using Virtual Internet Routing Lab (VIRL) simulation platform contributed by +Cisco. VIRL runs on physical baremetal servers hosted by LF FD.io project. All +tests are executed in two node logical test topology - Traffic Generator (TG) +node and Systems Under Test (SUT1) node connected in a loop. Logical test +topology is shown in the figure below. + +:: + + +------------------------+ + | | + | +------------------+ | + +---------------> <--------------+ + | | | | | | + | |------------> DUT1 <-----------+ | + | | | +------------------+ | | | + | | | | | | + | | | SUT1 | | | + | | +------------------------+ | | + | | | | + | | | | + | | +-----------+ | | + | +---------------> <---------------+ | + | | TG | | + +------------------> <------------------+ + +-----------+ + +SUT1 is a VM (Ubuntu or Centos, depending on the test suite), TG is a Traffic +Generator (TG, another Ubuntu VM). SUTs run Honeycomb and VPP SW applications +in Linux user-mode as a Device Under Test (DUT) within the VM. TG runs Scapy +SW application as a packet Traffic Generator. Logical connectivity between +SUTs and to TG is provided using virtual NICs using VMs' virtio driver. + +Virtual testbeds are created on-demand whenever a verification job is started +(e.g. triggered by the gerrit patch submission) and destroyed upon completion +of all functional tests. Each node is a Virtual Machine and each connection +that is drawn on the diagram is available for use in any test case. During the +test execution, all nodes are reachable thru the Management network connected +to every node via dedicated virtual NICs and virtual links (not shown above +for clarity). + +Functional Tests Coverage +------------------------- + +The following Honeycomb functional test areas are included in the CSIT |release| +with results listed in this report: + +- **Basic interface management** - CRUD for interface state, + - ipv4/ipv6 address, ipv4 neighbor, MTU value. + - Test case count: 7 +- **L2BD** - CRUD for L2 Bridge-Domain, interface assignment. + - Create up to two bridge domains with all implemented functions turned on. + - (flooding, unknown-unicast flooding, forwarding, learning, arp-termination) + - Assign up to two physical interfaces to a single bridge domain. + - Remove interface assignments, remove bridge domains. + - Test case count: 5 +- **L2FIB** - CRD for L2-FIB entries. + - Create 4 FIB entries + - (one of each for filter/forward, static/dynamic combinations). + - Remove FIB entries. + - Test case count: 7 +- **VxLAN** - CRD for VxLAN tunnels. + - Create VxLAN interface. + - Disable VxLAN interface. + - Re-create a disabled VxLAN interface. + - Test case count: 6 +- **VxLAN-GPE** - CRD for VxLAN GPE tunnels. + - Create VxLAN GPE interface. + - Disable VxLAN interface. + - Re-create a disabled VxLAN interface. + - Test case count: 7 +- **Vhost-user** - CRUD for Vhost-user interfaces. + - Create, modify and delete Vhost-user interface, as client and server. + - Test case count: 8 +- **TAP** - CRUD for Tap interface management. + - Create, modify and delete TAP interface. + - Test case count: 3 +- **VLAN** - CRUD for VLAN sub-interface management. + - Create VLAN sub-interface over a physical interface. + - Toggle interface state separately for super-interface and sub-interface. + - Configure IP address and bridge domain assignment on sub-interface. + - Configure VLAN tag rewrite on sub-interface. + - Test case count: 17 +- **ACL** - CRD for low-level classifiers: table and session management, + - interface assignment. + - Configure up to 2 classify tables. + - Configure up to 2 classify sessions on one table. + - Assign classify session to a physical interface. + - Remove tables, sessions, interface assignments. + - Test case count: 9 +- **PBB** - CRD for provider backbone bridge sub-interface. + - Configure, modify and remove a PBB sub-interface over a physical interface. + - Test case count: 9 +- **NSH_SFC** - CRD for NSH maps and entries, using NSH_SFC plugin. + - Configure up to 2 NSH entries. + - Configure up to 2 NSH maps. + - Modify and delete NSH maps and entries. + - Test case count: 8 +- **LISP** - CRD for Lisp: mapping, locator set, adjacency, map resolver. + - Toggle Lisp feature status. + - Configure and delete Lisp mapping as local and remote. + - Configure and delete Lisp adjacency mapping + - Configure and delete Lisp map resolver, proxy ITR. + - Test case count: 11 +- **NAT** - CRD for NAT entries, interface assignment. + - Configure and delete up to two NAT entries. + - Assign NAT entries to a physical interface. + - Test case count: 6 +- **Port mirroring** - CRD for SPAN port mirroring, interface assignment. + - Configure SPAN port mirroring on a physical interface, mirroring + - up to 2 interfaces. + - Remove SPAN configuration from interfaces. + - Test case count: 3 +- **ACL-PLUGIN** - CRD for high-level classifier + - MAC + IP address classification. + - IPv4, IPv6 address classification. + - TCP, UDP, ICMP, ICMPv6 protocol/next-header classification. + - port number classification. + - ICMP, ICMPv6 code and type classification. + - Test case count: 15 +- **ProxyARP** - CRD for proxyARP feature. + - Configure proxyARP. + - Assign to interface. + - Test case count: 3 +- **ProxyND6** - CRD for Neighbor Discovery Proxy. + - Configure ProxyND6 feature on interface. + - Test case count: 4 +- **DHCP Relay** - CRD for DHCP relay feature. + - Configure DHCP Relays. + - IPv4 and IPv6 variants. + - Test case count: 4 +- **SLAAC** - CRD for Stateless Address AutoConfiguration. + - Configure SLAAC feature on interfaces. + - Test case count: 7 +- **Routing** - CRD for routing. + - Configure single-hop route. + - Configure multi-hop routes. + - Configure blackhole route. + - IPv4 and IPv6 variants. + - Test case count: 6 +- **Honeycomb Infractructure** - configuration persistence, + - Netconf notifications for interface events, + - Netconf negative tests aimed at specific issues + +Total 158 Honeycomb tests in the CSIT |release|. + +Operational data in Honeycomb should mirror configuration data at all times. +Because of this, test cases follow this general pattern: + +#. read operational data of the feature using restconf. +#. read status of the feature using VPP API dump. +#. modify configuration of the feature using restconf. +#. verify changes to operational data using restconf. +#. verify changes using VPP API dump, OR +#. send a packet to VPP node and observe behaviour to verify configuration + +Test cases involving network interfaces utilize the first two interfaces on +the DUT node. + +Functional Tests Naming +----------------------- + +CSIT |release| introduced a common structured naming convention for all +performance and functional tests. This change was driven by substantially +growing number and type of CSIT test cases. Firstly, the original practice did +not always follow any strict naming convention. Secondly test names did not +always clearly capture tested packet encapsulations, and the actual type or +content of the tests. Thirdly HW configurations in terms of NICs, ports and +their locality were not captured either. These were but few reasons that drove +the decision to change and define a new more complete and stricter test naming +convention, and to apply this to all existing and new test cases. + +The new naming should be intuitive for majority of the tests. The complete +description of CSIT test naming convention is provided on `CSIT test naming +page `_. + +Here few illustrative examples of the new naming usage for functional test +suites: + +#. **Physical port to physical port - a.k.a. NIC-to-NIC, Phy-to-Phy, P2P** + + - *eth2p-ethip4-ip4base-func.robot* => 2 ports of Ethernet, IPv4 baseline + routed forwarding, functional tests. + +#. **Physical port to VM (or VM chain) to physical port - a.k.a. NIC2VM2NIC, + P2V2P, NIC2VMchain2NIC, P2V2V2P** + + - *eth2p-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-func.robot* => 2 ports of + Ethernet, IPv4 VXLAN Ethernet, L2 bridge-domain switching to/from two vhost + interfaces and one VM, functional tests. diff --git a/docs/report/index.rst b/docs/report/index.rst index f24e9e78c7..5ccc4b66c9 100644 --- a/docs/report/index.rst +++ b/docs/report/index.rst @@ -1,4 +1,4 @@ -CSIT 17.01 +CSIT 17.04 ========== .. toctree:: diff --git a/docs/report/testpmd_performance_tests/csit_release_notes.rst b/docs/report/testpmd_performance_tests/csit_release_notes.rst index 6d415a1add..ba78a1bc54 100644 --- a/docs/report/testpmd_performance_tests/csit_release_notes.rst +++ b/docs/report/testpmd_performance_tests/csit_release_notes.rst @@ -6,5 +6,5 @@ Changes in CSIT |release| #. Added Testpmd tests - - new NICs - Intel xl710 + - new NICs - 2p40GE Intel xl710, 2p10GE Intel x710 diff --git a/docs/report/testpmd_performance_tests/packet_latency_graphs/l2.rst b/docs/report/testpmd_performance_tests/packet_latency_graphs/l2.rst index bac622674c..ddb18f42d2 100644 --- a/docs/report/testpmd_performance_tests/packet_latency_graphs/l2.rst +++ b/docs/report/testpmd_performance_tests/packet_latency_graphs/l2.rst @@ -1,24 +1,33 @@ -L2 Ethernet Switching -===================== - -This section provides a summary of Testpmd Phy-to-Phy L2 Ethernet looping -performance illustrating packet latency measured at 50% of discovered NDR -throughput rate. Latency is reported for Testpmd running in multiple -configurations of Testpmd worker thread(s), a.k.a. Testpmd data plane thread -(s), and their physical CPU core(s) placement. - -*Title of each graph* is a regex (regular expression) matching all plotted -latency test cases, *X-axis labels* are indeces of csit-dpdk-perf-1704 jobs -that created result output files used as data sources for the graph, -*Y-axis labels* are measured packet Latency [uSec] values, and the *graph -legend* identifes the plotted test suites. +L2 Ethernet Interface Loop +========================== + +This section includes summary graphs of Testpmd Phy-to-Phy packet +latency with L2 Ethernet Interface Loop measured at 50% of discovered +NDR throughput rate. Latency is reported for Testpmd running in multiple +configurations of Testpmd pmd thread(s), a.k.a. Testpmd data plane +thread(s), and their physical CPU core(s) placement. + +Results are generated from a single execution of CSIT NDR discovery +test. Box plots are used to show the Minimum, Average and Maximum packet +latency per test. + +*Title of each graph* is a regex (regular expression) matching all +throughput test cases plotted on this graph, *X-axis labels* are indices +of individual test suites executed by csit-dpdk-perf-1704-all job that +created result output file used as data source for the graph, *Y-axis +labels* are measured packet Latency [uSec] values, and the *Graph +legend* lists the plotted test suites and their indices. Latency is +reported for concurrent symmetric bi-directional flows, separately for +each direction: i) West-to-East: TGint1-to-SUT1-to-SUT2-to-TGint2, and +ii) East-to-West: TGint2-to-SUT2-to-SUT1-to-TGint1. .. note:: - Data sources for reported test results: i) FD.io test executor jobs - `csit-dpdk-perf-1704-all `_ , - ii) archived FD.io jobs test result output files - `csit-dpdk-perf-1704-all#job-number `_. + Test results have been generated by FD.io test executor jobs + `csit-dpdk-perf-1704-all + `_, + with Robot Framework result files csit-dpdk-perf-1704-all-.zip + `archived here <../../_static/archive/>`_ Testpmd packet latency - running in configuration of **one worker thread (1t) on one physical core (1c)** - is presented in the figure below. @@ -34,10 +43,12 @@ git repository by filtering with specified regex as follows: .. code-block:: bash - $ csit/tests/perf - $ grep -E "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" * + $ $CSIT/dpdk-tests/perf + $ grep -E "64B-1t1c-eth-l2xcbase-testpmd-ndrdisc" * - 10ge2p1x520-eth-l2xcbase-ndrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-ndrdisc + 10ge2p1x520-eth-l2xcbase-testpmd-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-testpmd-ndrdisc + 10ge2p1x710-eth-l2xcbase-testpmd-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-testpmd-ndrdisc + 40ge2p1xl710-eth-l2xcbase-testpmd-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-testpmd-ndrdisc Testpmd packet latency - running in configuration of **two worker threads (2t) on two physical cores (2c)** - is presented in the figure below. @@ -53,8 +64,10 @@ git repository by filtering with specified regex as follows: .. code-block:: bash - $ csit/tests/perf - $ grep -E "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" * + $ $CSIT/dpdk-tests/perf + $ grep -E "64B-2t2c-eth-l2xcbase-testpmd-ndrdisc" * - 10ge2p1x520-eth-l2xcbase-ndrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-ndrdisc + 10ge2p1x520-eth-l2xcbase-testpmd-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-testpmd-ndrdisc + 10ge2p1x710-eth-l2xcbase-testpmd-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-testpmd-ndrdisc + 40ge2p1xl710-eth-l2xcbase-testpmd-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-testpmd-ndrdisc diff --git a/docs/report/testpmd_performance_tests/packet_throughput_graphs/l2.rst b/docs/report/testpmd_performance_tests/packet_throughput_graphs/l2.rst index 0afb54792d..3601384c99 100644 --- a/docs/report/testpmd_performance_tests/packet_throughput_graphs/l2.rst +++ b/docs/report/testpmd_performance_tests/packet_throughput_graphs/l2.rst @@ -1,29 +1,38 @@ -L2 Ethernet Looping -=================== - -Following sections provide a summary of VPP Phy-to-Phy L2 Ethernet Looping -performance illustrating NDR throughput (zero packet loss) and PDR throughput -(<0.5% packet loss). Performance is reported for Testpmd running in multiple -Testpmd worker thread (a.k.a. VPP data plane thread) and physical CPU core -configurations. - -*Title of each graph* is a regex (regular expression) matching all plotted -throughput test cases, *X-axis labels* are indeces of csit-dpdk-perf-1704 jobs -that created result output files used as data sources for the graph, -*Y-axis labels* are measured Packets Per Second [pps] values, and the *graph -legend* identifes the plotted test suites. +L2 Ethernet Interface Loop +========================== + +Following sections include summary graphs of DPDK Testpmd Phy-to-Phy performance +with L2 Ethernet Interface Loop, including NDR throughput (zero packet loss) +and PDR throughput (<0.5% packet loss). Performance is reported for Testpmd +running in multiple configurations of Testpmd pmd thread(s), a.k.a. Testpmd +data plane thread(s), and their physical CPU core(s) placement. + +Results are generated by multiple executions of the same CSIT tests. +In order to display variation in measured throughput values, Box-and- +whisker plots are used to show their quartiles (Min, 1st quartile / 25th +percentile, 2nd quartile / 50th percentile / mean, 3rd quartile / 75th +percentile, Max). Outliers are plotted as individual points. + +*Title of each graph* is a regex (regular expression) matching all +throughput test cases plotted on this graph, *X-axis labels* are indices +of individual test suites executed by csit-dpdk-perf-1704-all jobs that +created result output files used as data sources for the graph, *Y-axis +labels* are measured Packets Per Second [pps] values, and the *Graph +legend* lists the plotted test suites and their indices. .. note:: - Sources of test results data: i) FD.io executor jobs `csit-dpdk-perf-1704-all - `_ , - ii) FD.io jobs archieved result files. + Test results have been generated by FD.io test executor jobs + `csit-dpdk-perf-1704-all + `_, + with Robot Framework result files csit-dpdk-perf-1704-all-.zip + `archived here <../../_static/archive/>`_ NDR Throughput ~~~~~~~~~~~~~~ -NDR throughput of Testpmd - running in configuration of **one worker thread -(1t) on one physical core (1c)** - is presented in the figure below. +Testpmd NDR 64B packet throughput in 1t1c setup (1thread, 1core) is presented +in the graph below. .. raw:: html @@ -32,18 +41,20 @@ NDR throughput of Testpmd - running in configuration of **one worker thread *Figure 1. Testpmd 1thread 1core - NDR Throughput for Phy-to-Phy L2 Ethernet Looping.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/dpdk-tests/perf - $ grep -E "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" * + $ $CSIT/dpdk-tests/perf + $ grep -E "64B-1t1c-eth-l2xcbase-testpmd-ndrdisc" * - 10ge2p1x520-eth-l2xcbase-ndrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-ndrdisc + 10ge2p1x520-eth-l2xcbase-testpmd-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-testpmd-ndrdisc + 10ge2p1x710-eth-l2xcbase-testpmd-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-testpmd-ndrdisc + 40ge2p1xl710-eth-l2xcbase-testpmd-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-testpmd-ndrdisc -NDR throughput of Testpmd - running in configuration of **two worker threads -(2t) on two physical cores (2c)** - is presented in the figure below. +Testpmd NDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented +in the graph below. .. raw:: html @@ -52,22 +63,23 @@ NDR throughput of Testpmd - running in configuration of **two worker threads *Figure 2. Testpmd 2threads 2cores - NDR Throughput for Phy-to-Phy L2 Ethernet Looping.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/dpdk-tests/perf - $ grep -E "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" * + $ $CSIT/dpdk-tests/perf + $ grep -E "64B-2t2c-eth-l2xcbase-testpmd-ndrdisc" * - 10ge2p1x520-eth-l2xcbase-ndrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-ndrdisc + 10ge2p1x520-eth-l2xcbase-testpmd-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-testpmd-ndrdisc + 10ge2p1x710-eth-l2xcbase-testpmd-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-testpmd-ndrdisc + 40ge2p1xl710-eth-l2xcbase-testpmd-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-testpmd-ndrdisc PDR Throughput ~~~~~~~~~~~~~~ -PDR throughput of Testpmd - running in configuration of **one worker thread -(1t) on one physical core (1c)** - is presented in the figure below. PDR at -below 0.5% packet loss ratio. +Testpmd PDR 64B packet throughput in 1t1c setup (1thread, 1core) is presented +in the graph below. PDR measured for 0.5% packet loss ratio. .. raw:: html @@ -76,18 +88,20 @@ below 0.5% packet loss ratio. *Figure 3. Testpmd 1thread 1core - PDR Throughput for Phy-to-Phy L2 Ethernet Looping.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash $ csit/tests/perf - $ grep -E "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc" * + $ grep -E "64B-1t1c-eth-l2xcbase-testpmd-pdrdisc" * - 10ge2p1x520-eth-l2xcbase-ndrdisc.robot:| tc02-64B-1t1c-eth-l2xcbase-pdrdisc + 10ge2p1x520-eth-l2xcbase-testpmd-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2xcbase-testpmd-pdrdisc + 10ge2p1x710-eth-l2xcbase-testpmd-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2xcbase-testpmd-pdrdisc + 40ge2p1xl710-eth-l2xcbase-testpmd-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2xcbase-testpmd-pdrdisc -PDR throughput of Testpmd - running in configuration of **two worker threads -(2t) on two physical cores (2c)** - is presented in the figure below. +Testpmd PDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented +in the graph below. PDR measured for 0.5% packet loss ratio. .. raw:: html @@ -96,13 +110,15 @@ PDR throughput of Testpmd - running in configuration of **two worker threads *Figure 4. Testpmd 2thread 2core - PDR Throughput for Phy-to-Phy L2 Ethernet Looping.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash $ csit/tests/perf - $ grep -E "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc" * + $ grep -E "64B-2t2c-eth-l2xcbase-testpmd-pdrdisc" * - 10ge2p1x520-eth-l2xcbase-ndrdisc.robot:| tc08-64B-2t2c-eth-l2xcbase-pdrdisc + 10ge2p1x520-eth-l2xcbase-testpmd-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2xcbase-testpmd-pdrdisc + 10ge2p1x710-eth-l2xcbase-testpmd-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2xcbase-testpmd-pdrdisc + 40ge2p1xl710-eth-l2xcbase-testpmd-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2xcbase-testpmd-pdrdisc diff --git a/docs/report/vpp_functional_tests/csit_release_notes.rst b/docs/report/vpp_functional_tests/csit_release_notes.rst index c87cb46e3c..9732a98ffe 100644 --- a/docs/report/vpp_functional_tests/csit_release_notes.rst +++ b/docs/report/vpp_functional_tests/csit_release_notes.rst @@ -1,14 +1,14 @@ CSIT Release Notes ================== -Changes in CSIT |release| +Changes in CSIT |release| - to be updated ------------------------- #. Introduction of Centos7 tests #. Added VPP functional tests - - more VM vhost-user tests + - more VM vhost-user tests incl. two VM four vhost interface tests - more LISP tests - more IPSec crypto tests - IPv4 and IPv6 Equal-Cost Multi-Path routing tests @@ -16,10 +16,10 @@ Changes in CSIT |release| - IPFIX tests - SPAN tests -Known Issues +Known Issues - to be updated ------------ -Here is the list of known issues in CSIT |release| for VPP functional tests in VIRL: +Here is the list of known issues in CSIT |release| for VPP functional tests in VIRL: (to be updated) +---+-------------------------------------------------+-----------------------------------------------------------------+ | # | Issue | Description | @@ -44,11 +44,11 @@ Here is the list of known issues in CSIT |release| for VPP functional tests in V | | | Anyhow the correct IPv6 destination address is reported. | +---+-------------------------------------------------+-----------------------------------------------------------------+ | 6 | SPAN: Tx traffic is not mirrored | Tx traffic is not mirrored from SPAN source port to SPAN | -| | | destination port. |br| | +| | | destination port. | | | | NOTE: Fix is going to be merged in vpp stable/1701. | +---+-------------------------------------------------+-----------------------------------------------------------------+ | 7 | SPAN: Packet trace always contains local0 as | There is reported wrong destination port in the traffic trace: | -| | destination port | |br| SPAN: mirrored GigabitEthernet0/5/0 -> local0 |br| | +| | destination port | SPAN: mirrored GigabitEthernet0/5/0 -> local0 | | | | The (Rx) traffic is mirrored to correct destination port. | +---+-------------------------------------------------+-----------------------------------------------------------------+ diff --git a/docs/report/vpp_performance_tests/csit_release_notes.rst b/docs/report/vpp_performance_tests/csit_release_notes.rst index 4c942e19a4..193f685d10 100644 --- a/docs/report/vpp_performance_tests/csit_release_notes.rst +++ b/docs/report/vpp_performance_tests/csit_release_notes.rst @@ -1,7 +1,7 @@ CSIT Release Notes ================== -Changes in CSIT |release| +Changes in CSIT |release| - to be updated ------------------------- #. VPP performance test environment changes @@ -9,7 +9,7 @@ Changes in CSIT |release| - further VM and vhost-user test environment optimizations - Qemu virtio queue size increased from default value of 256 to 1024 - addition of HW cryptodev devices in all three LF FD.io physical testbeds -#. Added tests +#. Added tests (to be updated) - NICs @@ -29,7 +29,7 @@ Changes in CSIT |release| - L2 Bridge Domain switched-forwarding with vhost-user, VM and Intel x520 NIC -Performance Improvements +Performance Improvements - to be updated ------------------------ Substantial improvements in measured packet throughput, with relative increase @@ -42,68 +42,70 @@ NDR Throughput Non-Drop Rate Throughput discovery tests: -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| VPP Functionality | Test Name | VPP-16.09 | VPP-17.01 | Relative Improvement | -+===================+=================================================================+===========+=================+======================+ -| L2XC | 10ge2p1x520: 64B-1t1c-eth-l2xcbase-ndrdisc | 9.4 Mpps | 12.7 Mpps | 35% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| L2XC | 10ge2p1xl710: 64B-1t1c-eth-l2xcbase-ndrdisc | 9.5 Mpps | 12.2..12.4 Mpps | 28..30% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| L2XC dot1ad | 10ge2p1x520: 64B-1t1c-dot1ad-l2xcbase-ndrdisc | 7.4 Mpps | 8.8..9.0 Mpps | 19..23% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| L2XC dot1q | 10ge2p1x520: 64B-1t1c-dot1q-l2xcbase-ndrdisc | 7.5 Mpps | 8.8..9.0 Mpps | 17..20% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| L2XC VxLAN | 10ge2p1x520: 64B-1t1c-ethip4vxlan-l2xcbase-ndrdisc | 5.4 Mpps | 6.5 Mpps | 20% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| L2XC-vhost-VM | 10ge2p1x520: 64B-1t1c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc | 0.5 Mpps | 2.8..3.2 Mpps | 460..540% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| L2BD | 10ge2p1x520: 64B-1t1c-eth-l2bdbasemaclrn-ndrdisc | 7.8 Mpps | 10.4..10.6 Mpps | 33..36% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| L2BD-vhost-VM | 10ge2p1x520: 64B-1t1c-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc | 0.4 Mpps | 2.7..2.8 Mpps | 575..600% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv4 | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-ndrdisc | 8.7 Mpps | 9.7 Mpps | 12% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv4 COP | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-copwhtlistbase-ndrdisc | 7.1 Mpps | 8.3..8.5 Mpps | 17..20% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv4 iAcl | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-iacldstbase-ndrdisc | 6.9 Mpps | 7.6..7.8 Mpps | 10..13% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv4 vhost | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-eth-2vhost-1vm-ndrdisc | 0.3 Mpps | 2.6 Mpps | 767% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv6 | 10ge2p1x520: 78B-1t1c-ethip6-ip6base-ndrdisc | 3.0 Mpps | 7.3..7.7 Mpps | 143..157% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ ++-------------------+-----------------------------------------------------------------+------------+-----------+----------------------------+ +| VPP Functionality | Test Name | VPP-16.09 | VPP-17.01 | VPP-17.04 | 17.01 to 17.04 | +| | | [Mpps] | [Mpps] | [Mpps] | Improvement | ++===================+=================================================================+============+===========+===========+================+ +| L2XC | 10ge2p1x520: 64B-1t1c-eth-l2xcbase-ndrdisc | 9.4 | 12.7 | 13.4 | 6% | ++-------------------+-----------------------------------------------------------------+------------+-----------+-----------+----------------+ +| L2XC | 10ge2p1xl710: 64B-1t1c-eth-l2xcbase-ndrdisc | 9.5 | 12.2 | 12.4 | 2% | ++-------------------+-----------------------------------------------------------------+------------+-----------+-----------+----------------+ +| L2XC dot1ad | 10ge2p1x520: 64B-1t1c-dot1ad-l2xcbase-ndrdisc | 7.4 | 8.8 | 9.3 | 6% | ++-------------------+-----------------------------------------------------------------+------------+-----------+-----------+----------------+ +| L2XC dot1q | 10ge2p1x520: 64B-1t1c-dot1q-l2xcbase-ndrdisc | 7.5 | 8.8 | 9.2 | 5% | ++-------------------+-----------------------------------------------------------------+------------+-----------+-----------+----------------+ +| L2XC VxLAN | 10ge2p1x520: 64B-1t1c-ethip4vxlan-l2xcbase-ndrdisc | 5.4 | 6.5 | 6.8 | 5% | ++-------------------+-----------------------------------------------------------------+------------+-----------+-----------+----------------+ +| L2XC-vhost-VM | 10ge2p1x520: 64B-1t1c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc | 0.5 | 2.8 | 3.2 | 14% | ++-------------------+-----------------------------------------------------------------+------------+-----------+-----------+----------------+ +| L2BD | 10ge2p1x520: 64B-1t1c-eth-l2bdbasemaclrn-ndrdisc | 7.8 | 10.4 | 10.8 | 4% | ++-------------------+-----------------------------------------------------------------+------------+-----------+-----------+----------------+ +| L2BD-vhost-VM | 10ge2p1x520: 64B-1t1c-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc | 0.4 | 2.7 | 3.4 | 26% | ++-------------------+-----------------------------------------------------------------+------------+-----------+-----------+----------------+ +| IPv4 | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-ndrdisc | 8.7 | 9.7 | 10.6 | 9% | ++-------------------+-----------------------------------------------------------------+------------+-----------+-----------+----------------+ +| IPv4 COP | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-copwhtlistbase-ndrdisc | 7.1 | 8.3 | 9.0 | 8% | ++-------------------+-----------------------------------------------------------------+------------+-----------+-----------+----------------+ +| IPv4 iAcl | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-iacldstbase-ndrdisc | 6.9 | 7.6 | 8.3 | 9% | ++-------------------+-----------------------------------------------------------------+------------+-----------+-----------+----------------+ +| IPv4 vhost | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-eth-2vhost-1vm-ndrdisc | 0.3 | 2.6 | 3.1 | 19% | ++-------------------+-----------------------------------------------------------------+------------+-----------+-----------+----------------+ +| IPv6 | 10ge2p1x520: 78B-1t1c-ethip6-ip6base-ndrdisc | 3.0 | 7.3 | 8.5 | 16% | ++-------------------+-----------------------------------------------------------------+------------+-----------+-----------+----------------+ PDR Throughput ~~~~~~~~~~~~~~ Partial Drop Rate thoughput discovery tests with packet Loss Tolerance of 0.5%: -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| VPP Functionality | Test Name | VPP-16.09 | VPP-17.01 | Relative Improvement | -+===================+=================================================================+===========+=================+======================+ -| L2XC | 10ge2p1x520: 64B-1t1c-eth-l2xcbase-pdrdisc | 9.4 Mpps | 12.7..12.9 Mpps | 35..37% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| L2XC dot1ad | 10ge2p1x520: 64B-1t1c-dot1ad-l2xcbase-pdrdisc | 7.4 Mpps | 8.8..9.1 Mpps | 19..23% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| L2XC dot1q | 10ge2p1x520: 64B-1t1c-dot1q-l2xcbase-pdrdisc | 7.5 Mpps | 8.8..9.0 Mpps | 17..20% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| L2XC VxLAN | 10ge2p1x520: 64B-1t1c-ethip4vxlan-l2xcbase-pdrdisc | 5.4 Mpps | 6.5 Mpps | 20% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| L2XC-vhost-VM | 10ge2p1x520: 64B-1t1c-eth-l2xcbase-eth-2vhost-1vm-pdrdisc | 2.6 Mpps | 3.2..3.3 Mpps | 23..26% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| L2BD | 10ge2p1x520: 64B-1t1c-eth-l2bdbasemaclrn-pdrdisc | 7.8 Mpps | 10.6 Mpps | 36% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| L2BD-vhost-VM | 10ge2p1x520: 64B-1t1c-eth-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc | 2.1 Mpps | 2.9 Mpps | 38% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv4 | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-pdrdisc | 8.7 Mpps | 9.7 Mpps | 11% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv4 COP | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-copwhtlistbase-pdrdisc | 7.1 Mpps | 8.3..8.5 Mpps | 17..20% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv4 vhost | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-eth-2vhost-1vm-pdrdisc | 2.0 Mpps | 2.7 Mpps | 35% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ ++-------------------+-----------------------------------------------------------------+-----------+-----------+-------------------------+ +| VPP Functionality | Test Name | VPP-16.09 | VPP-17.01 | VPP-17.04 | Relative | +| | | [Mpps] | [Mpps] | [Mpps] | Improvement | ++===================+=================================================================+===========+===========+===========+=============+ +| L2XC | 10ge2p1x520: 64B-1t1c-eth-l2xcbase-pdrdisc | 9.4 | 12.7 | 13.4 | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| L2XC dot1ad | 10ge2p1x520: 64B-1t1c-dot1ad-l2xcbase-pdrdisc | 7.4 | 8.8 | 9.3 | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| L2XC dot1q | 10ge2p1x520: 64B-1t1c-dot1q-l2xcbase-pdrdisc | 7.5 | 8.8 | 9.5 | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| L2XC VxLAN | 10ge2p1x520: 64B-1t1c-ethip4vxlan-l2xcbase-pdrdisc | 5.4 | 6.5 | 6.8 | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| L2XC-vhost-VM | 10ge2p1x520: 64B-1t1c-eth-l2xcbase-eth-2vhost-1vm-pdrdisc | 2.6 | 3.2 | 3.2 | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| L2BD | 10ge2p1x520: 64B-1t1c-eth-l2bdbasemaclrn-pdrdisc | 7.8 | 10.6 | 11.1 | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| L2BD-vhost-VM | 10ge2p1x520: 64B-1t1c-eth-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc | 2.1 | 2.9 | 3.2 | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| IPv4 | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-pdrdisc | 8.7 | 9.7 | 10.6 | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| IPv4 COP | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-copwhtlistbase-pdrdisc | 7.1 | 8.3 | 9.2 | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| IPv4 vhost | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-eth-2vhost-1vm-pdrdisc | 2.0 | 2.7 | 3.2 | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ Measured improvements are in line with VPP code optimizations listed in -`VPP-17.01 release notes -`_. +`VPP-17.04 release notes +`_. Additionally, vhost-VM performance improvements are due to both VPP code optimizations as well as due to the FD.io CSIT Linux KVM test environment @@ -111,7 +113,7 @@ optimizations for vhost-VM tests - see section "2.1.7. Methodology: KVM VM vhost". -Other Performance Changes +Other Performance Changes - to be updated ------------------------- Other changes in measured packet throughput, with either minor relative @@ -124,67 +126,69 @@ NDR Throughput Non-Drop Rate Throughput discovery tests: -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| VPP Functionality | Test Name | VPP-16.09 | VPP-17.01 | Relative Change | -+===================+=================================================================+===========+=================+======================+ -| IPv4 FIB 200k | 10ge2p1x520: 64B-1t1c-ethip4-ip4scale200k-ndrdisc | 8.5 Mpps | 9.0 Mpps | 6% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv4 FIB 20k | 10ge2p1x520: 64B-1t1c-ethip4-ip4scale20k-ndrdisc | 8.5 Mpps | 9.0..9.2 Mpps | 6..8% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv4 FIB 2M | 10ge2p1x520: 64B-1t1c-ethip4-ip4scale2m-ndrdisc | 8.5 Mpps | 7.8..8.1 Mpps | -8..-5% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv4 Policer | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-ipolicemarkbase-ndrdisc | 6.9 Mpps | 7.4..7.6 Mpps | 7..10% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv4 LISP | 10ge2p1x520: 64B-1t1c-ethip4lispip4-ip4base-ndrdisc | 4.4 Mpps | 4.8 Mpps | 9% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv6 COP | 10ge2p1x520: 78B-1t1c-ethip6-ip6base-copwhtlistbase-ndrdisc | 6.1 Mpps | 6.1..6.5 Mpps | 0..7% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv6 FIB 200k | 10ge2p1x520: 78B-1t1c-ethip6-ip6scale200k-ndrdisc | 6.5 Mpps | 5.3..5.7 Mpps | -18..-12% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv6 FIB 20k | 10ge2p1x520: 78B-1t1c-ethip6-ip6scale20k-ndrdisc | 6.9 Mpps | 6.5 Mpps | -6% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv6 FIB 2M | 10ge2p1x520: 78B-1t1c-ethip6-ip6scale2m-ndrdisc | 5.3 Mpps | 4.2 Mpps | -21% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv6 iAcl | 10ge2p1x520: 78B-1t1c-ethip6-ip6base-iacldstbase-ndrdisc | 6.5 Mpps | 6.1..6.5 Mpps | -6..0% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ ++-------------------+-----------------------------------------------------------------+-----------+-----------+-------------------------+ +| VPP Functionality | Test Name | VPP-16.09 | VPP-17.01 | VPP-17.04 | Relative | +| | | [Mpps] | [Mpps] | [Mpps] | Change | ++===================+=================================================================+===========+===========+===========+=============+ +| IPv4 FIB 200k | 10ge2p1x520: 64B-1t1c-ethip4-ip4scale200k-ndrdisc | 8.5 | 9.0 | | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| IPv4 FIB 20k | 10ge2p1x520: 64B-1t1c-ethip4-ip4scale20k-ndrdisc | 8.5 | 9.0 | | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| IPv4 FIB 2M | 10ge2p1x520: 64B-1t1c-ethip4-ip4scale2m-ndrdisc | 8.5 | 7.8 | | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| IPv4 Policer | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-ipolicemarkbase-ndrdisc | 6.9 | 7.4 | | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| IPv4 LISP | 10ge2p1x520: 64B-1t1c-ethip4lispip4-ip4base-ndrdisc | 4.4 | 4.8 | | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| IPv6 COP | 10ge2p1x520: 78B-1t1c-ethip6-ip6base-copwhtlistbase-ndrdisc | 6.1 | 6.1 | | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| IPv6 FIB 200k | 10ge2p1x520: 78B-1t1c-ethip6-ip6scale200k-ndrdisc | 6.5 | 5.3 | | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| IPv6 FIB 20k | 10ge2p1x520: 78B-1t1c-ethip6-ip6scale20k-ndrdisc | 6.9 | 6.5 | | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| IPv6 FIB 2M | 10ge2p1x520: 78B-1t1c-ethip6-ip6scale2m-ndrdisc | 5.3 | 4.2 | | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| IPv6 iAcl | 10ge2p1x520: 78B-1t1c-ethip6-ip6base-iacldstbase-ndrdisc | 6.5 | 6.1 | | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ PDR Throughput ~~~~~~~~~~~~~~ Partial Drop Rate thoughput discovery tests with packet Loss Tolerance of 0.5%: -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| VPP Functionality | Test Name | VPP-16.09 | VPP-17.01 | Relative Change | -+===================+=================================================================+===========+=================+======================+ -| IPv4 FIB 200k | 10ge2p1x520: 64B-1t1c-ethip4-ip4scale200k-pdrdisc | 8.5 Mpps | 9.0 Mpps | 6% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv4 FIB 20k | 10ge2p1x520: 64B-1t1c-ethip4-ip4scale20k-pdrdisc | 8.5 Mpps | 9.0..9.2 Mpps | 6..8% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv4 FIB 2M | 10ge2p1x520: 64B-1t1c-ethip4-ip4scale2m-pdrdisc | 8.3 Mpps | 8.1 Mpps | -2% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv4 iAcl | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-iacldstbase-pdrdisc | 7.1 Mpps | 7.6..7.8 Mpps | 7..10% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv4 Policer | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-ipolicemarkbase-pdrdisc | 7.1 Mpps | 7.4..7.6 Mpps | 4..7% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv4 LISP | 10ge2p1x520: 64B-1t1c-ethip4lispip4-ip4base-pdrdisc | 4.6 Mpps | 4.8 Mpps | 9% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv6 | 10ge2p1x520: 78B-1t1c-ethip6-ip6base-pdrdisc | 7.7 Mpps | 7.3..7.7 Mpps | -5..0% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv6 COP | 10ge2p1x520: 78B-1t1c-ethip6-ip6base-copwhtlistbase-pdrdisc | 6.1 Mpps | 6.1..6.5 Mpps | 0..7% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv6 FIB 200k | 10ge2p1x520: 78B-1t1c-ethip6-ip6scale200k-pdrdisc | 6.9 Mpps | 5.3..5.7 Mpps | -23..-17% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv6 FIB 20k | 10ge2p1x520: 78B-1t1c-ethip6-ip6scale20k-pdrdisc | 6.9 Mpps | 6.5 Mpps | -6% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv6 FIB 2M | 10ge2p1x520: 78B-1t1c-ethip6-ip6scale2m-pdrdisc | 5.3 Mpps | 4.2 Mpps | -21% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ -| IPv6 iAcl | 10ge2p1x520: 78B-1t1c-ethip6-ip6base-iacldstbase-pdrdisc | 6.5 Mpps | 6.1..6.5 Mpps | -6..0% | -+-------------------+-----------------------------------------------------------------+-----------+-----------------+----------------------+ - -Known Issues ++-------------------+-----------------------------------------------------------------+-----------+-----------+-------------------------+ +| VPP Functionality | Test Name | VPP-16.09 | VPP-17.01 | VPP-17.04 | Relative | +| | | [Mpps] | [Mpps] | [Mpps] | Change | ++===================+=================================================================+===========+===========+===========+=============+ +| IPv4 FIB 200k | 10ge2p1x520: 64B-1t1c-ethip4-ip4scale200k-pdrdisc | 8.5 | 9.0 | | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| IPv4 FIB 20k | 10ge2p1x520: 64B-1t1c-ethip4-ip4scale20k-pdrdisc | 8.5 | 9.0 | | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| IPv4 FIB 2M | 10ge2p1x520: 64B-1t1c-ethip4-ip4scale2m-pdrdisc | 8.3 | 8.1 | | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| IPv4 iAcl | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-iacldstbase-pdrdisc | 7.1 | 7.6 | | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| IPv4 Policer | 10ge2p1x520: 64B-1t1c-ethip4-ip4base-ipolicemarkbase-pdrdisc | 7.1 | 7.4 | | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| IPv4 LISP | 10ge2p1x520: 64B-1t1c-ethip4lispip4-ip4base-pdrdisc | 4.6 | 4.8 | | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| IPv6 | 10ge2p1x520: 78B-1t1c-ethip6-ip6base-pdrdisc | 7.7 | 7.3 | | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| IPv6 COP | 10ge2p1x520: 78B-1t1c-ethip6-ip6base-copwhtlistbase-pdrdisc | 6.1 | 6.1 | | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| IPv6 FIB 200k | 10ge2p1x520: 78B-1t1c-ethip6-ip6scale200k-pdrdisc | 6.9 | 5.3 | | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| IPv6 FIB 20k | 10ge2p1x520: 78B-1t1c-ethip6-ip6scale20k-pdrdisc | 6.9 | 6.5 | | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| IPv6 FIB 2M | 10ge2p1x520: 78B-1t1c-ethip6-ip6scale2m-pdrdisc | 5.3 | 4.2 | | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ +| IPv6 iAcl | 10ge2p1x520: 78B-1t1c-ethip6-ip6base-iacldstbase-pdrdisc | 6.5 | 6.1 | | % | ++-------------------+-----------------------------------------------------------------+-----------+-----------+-----------+-------------+ + +Known Issues - to be updated ------------ -Here is the list of known issues in CSIT |release| for VPP performance tests: +Here is the list of known issues in CSIT |release| for VPP performance tests: (to be updated) +---+-------------------------------------------------+-----------------------------------------------------------------+ | # | Issue | Description | @@ -192,9 +196,6 @@ Here is the list of known issues in CSIT |release| for VPP performance tests: | 1 | Sporadic IPv4 routed-forwarding NDR discovery | Suspected issue with DPDK IPv4 checksum calculation. | | | test failures for 1518B frame size | Observed frequency: sporadic, ca. 20% to 30% of test runs. | +---+-------------------------------------------------+-----------------------------------------------------------------+ -| 2 | Vic1385 and Vic1227 NICs jumbo frames test | Suspected issue with VIC DPDK drivers that do not support jumbo | -| | failures (9000B) | frames (dropped rx-miss). Observed frequency: 100%. | -+---+-------------------------------------------------+-----------------------------------------------------------------+ | 3 | Vic1385 and Vic1227 performance | Low performance of NDR results. Big difference between NDR and | | | | PDR. . | +---+-------------------------------------------------+-----------------------------------------------------------------+ diff --git a/docs/report/vpp_performance_tests/overview.rst b/docs/report/vpp_performance_tests/overview.rst index 0a5525a6d8..dbc1612f84 100644 --- a/docs/report/vpp_performance_tests/overview.rst +++ b/docs/report/vpp_performance_tests/overview.rst @@ -1,320 +1,329 @@ -Overview -======== - -Tested Physical Topologies --------------------------- - -CSIT VPP performance tests are executed on physical baremetal servers hosted by LF -FD.io project. Testbed physical topology is shown in the figure below. - -:: - - +------------------------+ +------------------------+ - | | | | - | +------------------+ | | +------------------+ | - | | | | | | | | - | | <-----------------> | | - | | DUT1 | | | | DUT2 | | - | +--^---------------+ | | +---------------^--+ | - | | | | | | - | | SUT1 | | SUT2 | | - +------------------------+ +------------------^-----+ - | | - | | - | +-----------+ | - | | | | - +------------------> TG <------------------+ - | | - +-----------+ - -SUT1 and SUT2 are two System Under Test servers (Cisco UCS C240, each with two -Intel XEON CPUs), TG is a Traffic Generator (TG, another Cisco UCS C240, with -two Intel XEON CPUs). SUTs run VPP SW application in Linux user-mode as a -Device Under Test (DUT). TG runs TRex SW application as a packet Traffic -Generator. Physical connectivity between SUTs and to TG is provided using -different NIC models that need to be tested for performance. Currently -installed and tested NIC models include: - -#. 2port10GE X520-DA2 Intel. -#. 2port10GE X710 Intel. -#. 2port10GE VIC1227 Cisco. -#. 2port40GE VIC1385 Cisco. -#. 2port40GE XL710 Intel. - -From SUT and DUT perspective, all performance tests involve forwarding packets -between two physical Ethernet ports (10GE or 40GE). Due to the number of -listed NIC models tested and available PCI slot capacity in SUT servers, in -all of the above cases both physical ports are located on the same NIC. In -some test cases this results in measured packet throughput being limited not -by VPP DUT but by either the physical interface or the NIC capacity. - -Going forward CSIT project will be looking to add more hardware into FD.io -performance labs to address larger scale multi-interface and multi-NIC -performance testing scenarios. - -For test cases that require DUT (VPP) to communicate with VM(s) over vhost-user -interfaces, N of VM instances are created on SUT1 and SUT2. For N=1 DUT (VPP) forwards packets between vhostuser and physical interfaces. For N>1 DUT (VPP) a logical service chain forwarding topology is created on DUT (VPP) by applying L2 or IPv4/IPv6 configuration depending on the test suite. -DUT (VPP) test topology with N VM instances -is shown in the figure below including applicable packet flow thru the DUTs and VMs -(marked in the figure with ``***``). - -:: - - +-------------------------+ +-------------------------+ - | +---------+ +---------+ | | +---------+ +---------+ | - | | VM[1] | | VM[N] | | | | VM[1] | | VM[N] | | - | | ***** | | ***** | | | | ***** | | ***** | | - | +--^---^--+ +--^---^--+ | | +--^---^--+ +--^---^--+ | - | *| |* *| |* | | *| |* *| |* | - | +--v---v-------v---v--+ | | +--v---v-------v---v--+ | - | | * * * * |*|***********|*| * * * * | | - | | * ********* ***<-|-----------|->*** ********* * | | - | | * DUT1 | | | | DUT2 * | | - | +--^------------------+ | | +------------------^--+ | - | *| | | |* | - | *| SUT1 | | SUT2 |* | - +-------------------------+ +-------------------------+ - *| |* - *| |* - *| +-----------+ |* - *| | | |* - *+--------------------> TG <--------------------+* - **********************| |********************** - +-----------+ - -For VM tests, packets are switched by DUT (VPP) multiple times: twice for a single VM, three times for two VMs, N+1 times for N VMs. -Hence the external -throughput rates measured by TG and listed in this report must be multiplied -by (N+1) to represent the actual DUT aggregate packet forwarding rate. - -CSIT |release| - -Note that reported VPP performance results are specific to the SUTs tested. -Current LF FD.io SUTs are based on Intel XEON E5-2699v3 2.3GHz CPUs. SUTs with -other CPUs are likely to yield different results. A good rule of thumb, that -can be applied to estimate VPP packet thoughput for Phy-to-Phy (NIC-to-NIC, -PCI-to-PCI) topology, is to expect the forwarding performance to be -proportional to CPU core frequency, assuming CPU is the only limiting factor -and all other SUT parameters equivalent to FD.io CSIT environment. The same rule of -thumb can be also applied for Phy-to-VM-to-Phy (NIC-to-VM-to-NIC) topology, -but due to much higher dependency on intensive memory operations and -sensitivity to Linux kernel scheduler settings and behaviour, this estimation -may not always yield good enough accuracy. - -For detailed LF FD.io test bed specification and physical topology please refer to `LF FDio CSIT testbed wiki page `_. - -Performance Tests Coverage --------------------------- - -Performance tests are split into the two main categories: - -- Throughput discovery - discovery of packet forwarding rate using binary search - in accordance to RFC2544. - - - NDR - discovery of Non Drop Rate packet throughput, at zero packet loss; - followed by one-way packet latency measurements at 10%, 50% and 100% of - discovered NDR throughput. - - PDR - discovery of Partial Drop Rate, with specified non-zero packet loss - currently set to 0.5%; followed by one-way packet latency measurements at - 100% of discovered PDR throughput. - -- Throughput verification - verification of packet forwarding rate against - previously discovered throughput rate. These tests are currently done against - 0.9 of reference NDR, with reference rates updated periodically. - -CSIT |release| includes following performance test suites, listed per NIC type: - -- 2port10GE X520-DA2 Intel - - - **L2XC** - L2 Cross-Connect switched-forwarding of untagged, dot1q, dot1ad - VLAN tagged Ethernet frames. - - **L2BD** - L2 Bridge-Domain switched-forwarding of untagged Ethernet frames - with MAC learning; disabled MAC learning i.e. static MAC tests to be added. - - **IPv4** - IPv4 routed-forwarding. - - **IPv6** - IPv6 routed-forwarding. - - **IPv4 Scale** - IPv4 routed-forwarding with 20k, 200k and 2M FIB entries. - - **IPv6 Scale** - IPv6 routed-forwarding with 20k, 200k and 2M FIB entries. - - **VM with vhost-user** - switching between NIC ports and VM over vhost-user - interfaces in different switching modes incl. L2 Cross-Connect, L2 - Bridge-Domain, VXLAN with L2BD, IPv4 routed-forwarding. - - **COP** - IPv4 and IPv6 routed-forwarding with COP address security. - - **iACL** - IPv4 and IPv6 routed-forwarding with iACL address security. - - **LISP** - LISP overlay tunneling for IPv4-over-IPV4, IPv6-over-IPv4, - IPv6-over-IPv6, IPv4-over-IPv6 in IPv4 and IPv6 routed-forwarding modes. - - **VXLAN** - VXLAN overlay tunnelling integration with L2XC and L2BD. - - **QoS Policer** - ingress packet rate measuring, marking and limiting - (IPv4). - -- 2port40GE XL710 Intel - - - **L2XC** - L2 Cross-Connect switched-forwarding of untagged Ethernet frames. - - **L2BD** - L2 Bridge-Domain switched-forwarding of untagged Ethernet frames - with MAC learning. - - **IPv4** - IPv4 routed-forwarding. - - **IPv6** - IPv6 routed-forwarding. - - **VM with vhost-user** - switching between NIC ports and VM over vhost-user - interfaces in different switching modes incl. L2 Bridge-Domain. - -- 2port10GE X710 Intel - - - **L2BD** - L2 Bridge-Domain switched-forwarding of untagged Ethernet frames - with MAC learning. - - **VM with vhost-user** - switching between NIC ports and VM over vhost-user - interfaces in different switching modes incl. L2 Bridge-Domain. - -- 2port10GE VIC1227 Cisco - - - **L2BD** - L2 Bridge-Domain switched-forwarding of untagged Ethernet frames - with MAC learning. - -- 2port40GE VIC1385 Cisco - - - **L2BD** - L2 Bridge-Domain switched-forwarding of untagged Ethernet frames - with MAC learning. - -Execution of performance tests takes time, especially the throughput discovery -tests. Due to limited HW testbed resources available within FD.io labs hosted -by Linux Foundation, the number of tests for NICs other than X520 (a.k.a. -Niantic) has been limited to few baseline tests. Over time we expect the HW -testbed resources to grow, and will be adding complete set of performance -tests for all models of hardware to be executed regularly and(or) -continuously. - -Performance Tests Naming ------------------------- - -CSIT |release| follows a common structured naming convention for all -performance and system functional tests, introduced in CSIT rls1701. - -The naming should be intuitive for majority of the tests. Complete -description of CSIT test naming convention is provided on `CSIT test naming wiki -`_. - -Here few illustrative examples of the new naming usage for performance test -suites: - -#. **Physical port to physical port - a.k.a. NIC-to-NIC, Phy-to-Phy, P2P** - - - *PortNICConfig-WireEncapsulation-PacketForwardingFunction- - PacketProcessingFunction1-...-PacketProcessingFunctionN-TestType* - - *10ge2p1x520-dot1q-l2bdbasemaclrn-ndrdisc.robot* => 2 ports of 10GE on - Intel x520 NIC, dot1q tagged Ethernet, L2 bridge-domain baseline switching - with MAC learning, NDR throughput discovery. - - *10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrchk.robot* => 2 ports of 10GE - on Intel x520 NIC, IPv4 VXLAN Ethernet, L2 bridge-domain baseline - switching with MAC learning, NDR throughput discovery. - - *10ge2p1x520-ethip4-ip4base-ndrdisc.robot* => 2 ports of 10GE on Intel - x520 NIC, IPv4 baseline routed forwarding, NDR throughput discovery. - - *10ge2p1x520-ethip6-ip6scale200k-ndrdisc.robot* => 2 ports of 10GE on - Intel x520 NIC, IPv6 scaled up routed forwarding, NDR throughput - discovery. - -#. **Physical port to VM (or VM chain) to physical port - a.k.a. NIC2VM2NIC, - P2V2P, NIC2VMchain2NIC, P2V2V2P** - - - *PortNICConfig-WireEncapsulation-PacketForwardingFunction- - PacketProcessingFunction1-...-PacketProcessingFunctionN-VirtEncapsulation- - VirtPortConfig-VMconfig-TestType* - - *10ge2p1x520-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc.robot* => 2 ports - of 10GE on Intel x520 NIC, dot1q tagged Ethernet, L2 bridge-domain - switching to/from two vhost interfaces and one VM, NDR throughput - discovery. - - *10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc.robot* => 2 - ports of 10GE on Intel x520 NIC, IPv4 VXLAN Ethernet, L2 bridge-domain - switching to/from two vhost interfaces and one VM, NDR throughput - discovery. - - *10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-4vhost-2vm-ndrdisc.robot* => 2 - ports of 10GE on Intel x520 NIC, IPv4 VXLAN Ethernet, L2 bridge-domain - switching to/from four vhost interfaces and two VMs, NDR throughput - discovery. - -Methodology: Multi-Thread and Multi-Core ----------------------------------------- - -**HyperThreading** - CSIT |release| performance tests are executed with SUT -servers' Intel XEON CPUs configured in HyperThreading Disabled mode (BIOS -settings). This is the simplest configuration used to establish baseline -single-thread single-core SW packet processing and forwarding performance. -Subsequent releases of CSIT will add performance tests with Intel -HyperThreading Enabled (requires BIOS settings change and hard reboot). - -**Multi-core Test** - CSIT |release| multi-core tests are executed in the -following VPP thread and core configurations: - -#. 1t1c - 1 VPP worker thread on 1 CPU physical core. -#. 2t2c - 2 VPP worker threads on 2 CPU physical cores. - -Note that in quite a few test cases running VPP on 2 physical cores hits -the tested NIC I/O bandwidth or packets-per-second limit. - -Methodology: Packet Throughput ------------------------------- - -Following values are measured and reported for packet throughput tests: - -- NDR binary search per RFC2544: - - - Packet rate: "RATE: pps - (2x )" - - Aggregate bandwidth: "BANDWIDTH: Gbps (untagged)" - -- PDR binary search per RFC2544: - - - Packet rate: "RATE: pps (2x - )" - - Aggregate bandwidth: "BANDWIDTH: Gbps (untagged)" - - Packet loss tolerance: "LOSS_ACCEPTANCE "" - -- NDR and PDR are measured for the following L2 frame sizes: - - - IPv4: 64B, IMIX_v4_1 (28x64B,16x570B,4x1518B), 1518B, 9000B. - - IPv6: 78B, 1518B, 9000B. - - -Methodology: Packet Latency ---------------------------- - -TRex Traffic Generator (TG) is used for measuring latency of VPP DUTs. Reported -latency values are measured using following methodology: - -- Latency tests are performed at 10%, 50% of discovered NDR rate (non drop rate) - for each NDR throughput test and packet size (except IMIX). -- TG sends dedicated latency streams, one per direction, each at the rate of - 10kpps at the prescribed packet size; these are sent in addition to the main - load streams. -- TG reports min/avg/max latency values per stream direction, hence two sets - of latency values are reported per test case; future release of TRex is - expected to report latency percentiles. -- Reported latency values are aggregate across two SUTs due to three node - topology used for all performance tests; for per SUT latency, reported value - should be divided by two. -- 1usec is the measurement accuracy advertised by TRex TG for the setup used in - FD.io labs used by CSIT project. -- TRex setup introduces an always-on error of about 2*2usec per latency flow - - additonal Tx/Rx interface latency induced by TRex SW writing and reading - packet timestamps on CPU cores without HW acceleration on NICs closer to the - interface line. - - -Methodology: KVM VM vhost -------------------------- - -CSIT |release| introduced environment configuration changes to KVM Qemu vhost- -user tests in order to more representatively measure VPP-17.01 performance in -configurations with vhost-user interfaces and VMs. - -Current setup of CSIT FD.io performance lab is using tuned settings for more -optimal performance of KVM Qemu: - -- Default Qemu virtio queue size of 256 descriptors. -- Adjusted Linux kernel CFS scheduler settings, as detailed on this CSIT wiki - page: https://wiki.fd.io/view/CSIT/csit-perf-env-tuning-ubuntu1604. - -Adjusted Linux kernel CFS settings make the NDR and PDR throughput performance -of VPP+VM system less sensitive to other Linux OS system tasks by reducing -their interference on CPU cores that are designated for critical software -tasks under test, namely VPP worker threads in host and Testpmd threads in -guest dealing with data plan. +Overview +======== + +Tested Physical Topologies +-------------------------- + +CSIT VPP performance tests are executed on physical baremetal servers hosted by +LF FD.io project. Testbed physical topology is shown in the figure below. + +:: + + +------------------------+ +------------------------+ + | | | | + | +------------------+ | | +------------------+ | + | | | | | | | | + | | <-----------------> | | + | | DUT1 | | | | DUT2 | | + | +--^---------------+ | | +---------------^--+ | + | | | | | | + | | SUT1 | | SUT2 | | + +------------------------+ +------------------^-----+ + | | + | | + | +-----------+ | + | | | | + +------------------> TG <------------------+ + | | + +-----------+ + +SUT1 and SUT2 are two System Under Test servers (Cisco UCS C240, each with two +Intel XEON CPUs), TG is a Traffic Generator (TG, another Cisco UCS C240, with +two Intel XEON CPUs). SUTs run VPP SW application in Linux user-mode as a +Device Under Test (DUT). TG runs TRex SW application as a packet Traffic +Generator. Physical connectivity between SUTs and to TG is provided using +different NIC models that need to be tested for performance. Currently +installed and tested NIC models include: + +#. 2port10GE X520-DA2 Intel. +#. 2port10GE X710 Intel. +#. 2port10GE VIC1227 Cisco. +#. 2port40GE VIC1385 Cisco. +#. 2port40GE XL710 Intel. + +From SUT and DUT perspective, all performance tests involve forwarding packets +between two physical Ethernet ports (10GE or 40GE). Due to the number of +listed NIC models tested and available PCI slot capacity in SUT servers, in +all of the above cases both physical ports are located on the same NIC. In +some test cases this results in measured packet throughput being limited not +by VPP DUT but by either the physical interface or the NIC capacity. + +Going forward CSIT project will be looking to add more hardware into FD.io +performance labs to address larger scale multi-interface and multi-NIC +performance testing scenarios. + +For test cases that require DUT (VPP) to communicate with VM(s) over vhost-user +interfaces, N of VM instances are created on SUT1 and SUT2. For N=1 DUT (VPP) +forwards packets between vhostuser and physical interfaces. For N>1 DUT (VPP) a +logical service chain forwarding topology is created on DUT (VPP) by applying L2 +or IPv4/IPv6 configuration depending on the test suite. +DUT (VPP) test topology with N VM instances +is shown in the figure below including applicable packet flow thru the DUTs and +VMs (marked in the figure with ``***``). + +:: + + +-------------------------+ +-------------------------+ + | +---------+ +---------+ | | +---------+ +---------+ | + | | VM[1] | | VM[N] | | | | VM[1] | | VM[N] | | + | | ***** | | ***** | | | | ***** | | ***** | | + | +--^---^--+ +--^---^--+ | | +--^---^--+ +--^---^--+ | + | *| |* *| |* | | *| |* *| |* | + | +--v---v-------v---v--+ | | +--v---v-------v---v--+ | + | | * * * * |*|***********|*| * * * * | | + | | * ********* ***<-|-----------|->*** ********* * | | + | | * DUT1 | | | | DUT2 * | | + | +--^------------------+ | | +------------------^--+ | + | *| | | |* | + | *| SUT1 | | SUT2 |* | + +-------------------------+ +-------------------------+ + *| |* + *| |* + *| +-----------+ |* + *| | | |* + *+--------------------> TG <--------------------+* + **********************| |********************** + +-----------+ + +For VM tests, packets are switched by DUT (VPP) multiple times: twice for a +single VM, three times for two VMs, N+1 times for N VMs. +Hence the external +throughput rates measured by TG and listed in this report must be multiplied +by (N+1) to represent the actual DUT aggregate packet forwarding rate. + +Note that reported VPP performance results are specific to the SUTs tested. +Current LF FD.io SUTs are based on Intel XEON E5-2699v3 2.3GHz CPUs. SUTs with +other CPUs are likely to yield different results. A good rule of thumb, that +can be applied to estimate VPP packet thoughput for Phy-to-Phy (NIC-to-NIC, +PCI-to-PCI) topology, is to expect the forwarding performance to be +proportional to CPU core frequency, assuming CPU is the only limiting factor +and all other SUT parameters equivalent to FD.io CSIT environment. The same rule +of thumb can be also applied for Phy-to-VM-to-Phy (NIC-to-VM-to-NIC) topology, +but due to much higher dependency on intensive memory operations and +sensitivity to Linux kernel scheduler settings and behaviour, this estimation +may not always yield good enough accuracy. + +For detailed LF FD.io test bed specification and physical topology please refer +to `LF FDio CSIT testbed wiki page `_. + +Performance Tests Coverage +-------------------------- + +Performance tests are split into the two main categories: + +- Throughput discovery - discovery of packet forwarding rate using binary search + in accordance to RFC2544. + + - NDR - discovery of Non Drop Rate packet throughput, at zero packet loss; + followed by one-way packet latency measurements at 10%, 50% and 100% of + discovered NDR throughput. + - PDR - discovery of Partial Drop Rate, with specified non-zero packet loss + currently set to 0.5%; followed by one-way packet latency measurements at + 100% of discovered PDR throughput. + +- Throughput verification - verification of packet forwarding rate against + previously discovered throughput rate. These tests are currently done against + 0.9 of reference NDR, with reference rates updated periodically. + +CSIT |release| includes following performance test suites, listed per NIC type: + +- 2port10GE X520-DA2 Intel + + - **L2XC** - L2 Cross-Connect switched-forwarding of untagged, dot1q, dot1ad + VLAN tagged Ethernet frames. + - **L2BD** - L2 Bridge-Domain switched-forwarding of untagged Ethernet frames + with MAC learning; disabled MAC learning i.e. static MAC tests to be added. + - **IPv4** - IPv4 routed-forwarding. + - **IPv6** - IPv6 routed-forwarding. + - **IPv4 Scale** - IPv4 routed-forwarding with 20k, 200k and 2M FIB entries. + - **IPv6 Scale** - IPv6 routed-forwarding with 20k, 200k and 2M FIB entries. + - **VMs with vhost-user** - virtual topologies with 1 VM and service chains + of 2 VMs using vhost-user interfaces, with VPP forwarding modes incl. L2 + Cross-Connect, L2 Bridge-Domain, VXLAN with L2BD, IPv4 routed-forwarding. + - **COP** - IPv4 and IPv6 routed-forwarding with COP address security. + - **iACL** - IPv4 and IPv6 routed-forwarding with iACL address security. + - **LISP** - LISP overlay tunneling for IPv4-over-IPv4, IPv6-over-IPv4, + IPv6-over-IPv6, IPv4-over-IPv6 in IPv4 and IPv6 routed-forwarding modes. + - **VXLAN** - VXLAN overlay tunnelling integration with L2XC and L2BD. + - **QoS Policer** - ingress packet rate measuring, marking and limiting + (IPv4). + +- 2port40GE XL710 Intel + + - **L2XC** - L2 Cross-Connect switched-forwarding of untagged Ethernet frames. + - **L2BD** - L2 Bridge-Domain switched-forwarding of untagged Ethernet frames + with MAC learning. + - **IPv4** - IPv4 routed-forwarding. + - **IPv6** - IPv6 routed-forwarding. + - **VMs with vhost-user** - virtual topologies with 1 VM and service chains + of 2 VMs using vhost-user interfaces, with VPP forwarding modes incl. L2 + Cross-Connect, L2 Bridge-Domain, VXLAN with L2BD, IPv4 routed-forwarding. + - **IPSec** - IPSec encryption with AES-GCM, CBC-SHA1 ciphers, in combination + with IPv4 routed-forwarding. + - **IPSec+LISP** - IPSec encryption with CBC-SHA1 ciphers, in combination + with LISP-GPE overlay tunneling for IPv4-over-IPv4. + +- 2port10GE X710 Intel + + - **L2BD** - L2 Bridge-Domain switched-forwarding of untagged Ethernet frames + with MAC learning. + - **VMs with vhost-user** - virtual topologies with 1 VM using vhost-user + interfaces, with VPP forwarding modes incl. L2 Bridge-Domain. + +- 2port10GE VIC1227 Cisco + + - **L2BD** - L2 Bridge-Domain switched-forwarding of untagged Ethernet frames + with MAC learning. + +- 2port40GE VIC1385 Cisco + + - **L2BD** - L2 Bridge-Domain switched-forwarding of untagged Ethernet frames + with MAC learning. + +Execution of performance tests takes time, especially the throughput discovery +tests. Due to limited HW testbed resources available within FD.io labs hosted +by Linux Foundation, the number of tests for NICs other than X520 (a.k.a. +Niantic) has been limited to few baseline tests. Over time we expect the HW +testbed resources to grow, and will be adding complete set of performance +tests for all models of hardware to be executed regularly and(or) +continuously. + +Performance Tests Naming +------------------------ + +CSIT |release| follows a common structured naming convention for all +performance and system functional tests, introduced in CSIT rls1701. + +The naming should be intuitive for majority of the tests. Complete +description of CSIT test naming convention is provided on `CSIT test naming wiki +`_. + +Here few illustrative examples of the new naming usage for performance test +suites: + +#. **Physical port to physical port - a.k.a. NIC-to-NIC, Phy-to-Phy, P2P** + + - *PortNICConfig-WireEncapsulation-PacketForwardingFunction- + PacketProcessingFunction1-...-PacketProcessingFunctionN-TestType* + - *10ge2p1x520-dot1q-l2bdbasemaclrn-ndrdisc.robot* => 2 ports of 10GE on + Intel x520 NIC, dot1q tagged Ethernet, L2 bridge-domain baseline switching + with MAC learning, NDR throughput discovery. + - *10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrchk.robot* => 2 ports of 10GE + on Intel x520 NIC, IPv4 VXLAN Ethernet, L2 bridge-domain baseline + switching with MAC learning, NDR throughput discovery. + - *10ge2p1x520-ethip4-ip4base-ndrdisc.robot* => 2 ports of 10GE on Intel + x520 NIC, IPv4 baseline routed forwarding, NDR throughput discovery. + - *10ge2p1x520-ethip6-ip6scale200k-ndrdisc.robot* => 2 ports of 10GE on + Intel x520 NIC, IPv6 scaled up routed forwarding, NDR throughput + discovery. + +#. **Physical port to VM (or VM chain) to physical port - a.k.a. NIC2VM2NIC, + P2V2P, NIC2VMchain2NIC, P2V2V2P** + + - *PortNICConfig-WireEncapsulation-PacketForwardingFunction- + PacketProcessingFunction1-...-PacketProcessingFunctionN-VirtEncapsulation- + VirtPortConfig-VMconfig-TestType* + - *10ge2p1x520-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc.robot* => 2 ports + of 10GE on Intel x520 NIC, dot1q tagged Ethernet, L2 bridge-domain + switching to/from two vhost interfaces and one VM, NDR throughput + discovery. + - *10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc.robot* => 2 + ports of 10GE on Intel x520 NIC, IPv4 VXLAN Ethernet, L2 bridge-domain + switching to/from two vhost interfaces and one VM, NDR throughput + discovery. + - *10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-4vhost-2vm-ndrdisc.robot* => 2 + ports of 10GE on Intel x520 NIC, IPv4 VXLAN Ethernet, L2 bridge-domain + switching to/from four vhost interfaces and two VMs, NDR throughput + discovery. + +Methodology: Multi-Thread and Multi-Core +---------------------------------------- + +**HyperThreading** - CSIT |release| performance tests are executed with SUT +servers' Intel XEON CPUs configured in HyperThreading Disabled mode (BIOS +settings). This is the simplest configuration used to establish baseline +single-thread single-core SW packet processing and forwarding performance. +Subsequent releases of CSIT will add performance tests with Intel +HyperThreading Enabled (requires BIOS settings change and hard reboot). + +**Multi-core Test** - CSIT |release| multi-core tests are executed in the +following VPP thread and core configurations: + +#. 1t1c - 1 VPP worker thread on 1 CPU physical core. +#. 2t2c - 2 VPP worker threads on 2 CPU physical cores. + +Note that in quite a few test cases running VPP on 2 physical cores hits +the tested NIC I/O bandwidth or packets-per-second limit. + +Methodology: Packet Throughput +------------------------------ + +Following values are measured and reported for packet throughput tests: + +- NDR binary search per RFC2544: + + - Packet rate: "RATE: pps + (2x )" + - Aggregate bandwidth: "BANDWIDTH: Gbps (untagged)" + +- PDR binary search per RFC2544: + + - Packet rate: "RATE: pps (2x + )" + - Aggregate bandwidth: "BANDWIDTH: Gbps (untagged)" + - Packet loss tolerance: "LOSS_ACCEPTANCE "" + +- NDR and PDR are measured for the following L2 frame sizes: + + - IPv4: 64B, IMIX_v4_1 (28x64B,16x570B,4x1518B), 1518B, 9000B. + - IPv6: 78B, 1518B, 9000B. + + +Methodology: Packet Latency +--------------------------- + +TRex Traffic Generator (TG) is used for measuring latency of VPP DUTs. Reported +latency values are measured using following methodology: + +- Latency tests are performed at 10%, 50% of discovered NDR rate (non drop rate) + for each NDR throughput test and packet size (except IMIX). +- TG sends dedicated latency streams, one per direction, each at the rate of + 10kpps at the prescribed packet size; these are sent in addition to the main + load streams. +- TG reports min/avg/max latency values per stream direction, hence two sets + of latency values are reported per test case; future release of TRex is + expected to report latency percentiles. +- Reported latency values are aggregate across two SUTs due to three node + topology used for all performance tests; for per SUT latency, reported value + should be divided by two. +- 1usec is the measurement accuracy advertised by TRex TG for the setup used in + FD.io labs used by CSIT project. +- TRex setup introduces an always-on error of about 2*2usec per latency flow - + additonal Tx/Rx interface latency induced by TRex SW writing and reading + packet timestamps on CPU cores without HW acceleration on NICs closer to the + interface line. + + +Methodology: KVM VM vhost +------------------------- + +CSIT |release| introduced environment configuration changes to KVM Qemu vhost- +user tests in order to more representatively measure VPP-17.04 performance in +configurations with vhost-user interfaces and VMs. + +Current setup of CSIT FD.io performance lab is using tuned settings for more +optimal performance of KVM Qemu: + +- Qemu virtio queue size has been increased from default value of 256 to 1024 + descriptors. +- Adjusted Linux kernel CFS scheduler settings, as detailed on this CSIT wiki + page: https://wiki.fd.io/view/CSIT/csit-perf-env-tuning-ubuntu1604. + +Adjusted Linux kernel CFS settings make the NDR and PDR throughput performance +of VPP+VM system less sensitive to other Linux OS system tasks by reducing +their interference on CPU cores that are designated for critical software +tasks under test, namely VPP worker threads in host and Testpmd threads in +guest dealing with data plan. diff --git a/docs/report/vpp_performance_tests/packet_latency_graphs/ipsec.rst b/docs/report/vpp_performance_tests/packet_latency_graphs/ipsec.rst index 46aaa2a413..82f297ee92 100644 --- a/docs/report/vpp_performance_tests/packet_latency_graphs/ipsec.rst +++ b/docs/report/vpp_performance_tests/packet_latency_graphs/ipsec.rst @@ -1,42 +1,52 @@ -Crypto in hardware: IP4FWD, IP6FWD -================================== - -This section provides a summary of VPP Phy-to-Phy IPSEC HW -performance illustrating packet latency measured at 50% of discovered NDR -throughput rate. Latency is reported for VPP running in multiple -configurations of VPP worker thread(s), a.k.a. VPP data plane thread (s), and -their physical CPU core(s) placement. - -*Title of each graph* is a regex (regular expression) matching all plotted -latency test cases, *X-axis labels* are indeces of csit-vpp-perf-1704 jobs -that created result output files used as data sources for the graph, -*Y-axis labels* are measured packet Latency [uSec] values, and the *graph -legend* identifes the plotted test suites. +IPSec Crypto HW: IP4 Routed-Forwarding +====================================== + +This section includes summary graphs of VPP Phy-to-Phy packet latency +with IPSec encryption used in combination with IPv4 routed-forwarding, +with latency measured at 50% of discovered NDR throughput rate. VPP +IPSec encryption is accelerated using DPDK cryptodev library driving +Intel Quick Assist (QAT) crypto PCIe hardware cards. Latency is reported +for VPP running in multiple configurations of VPP worker thread(s), +a.k.a. VPP data plane thread(s), and their physical CPU core(s) +placement. + +Results are generated from a single execution of CSIT NDR discovery +test. Box plots are used to show the Minimum, Average and Maximum packet +latency per test. + +*Title of each graph* is a regex (regular expression) matching all +throughput test cases plotted on this graph, *X-axis labels* are indices +of individual test suites executed by csit-vpp-perf-1704-all job that +created result output file used as data source for the graph, *Y-axis +labels* are measured packet Latency [uSec] values, and the *Graph +legend* lists the plotted test suites and their indices. Latency is +reported for concurrent symmetric bi-directional flows, separately for +each direction: i) West-to-East: TGint1-to-SUT1-to-SUT2-to-TGint2, and +ii) East-to-West: TGint2-to-SUT2-to-SUT1-to-TGint1. .. note:: - Data sources for reported test results: i) FD.io test executor jobs + Test results have been generated by FD.io test executor jobs `csit-vpp-perf-1704-all - `_ , - ii) archived FD.io jobs test result `output files - <../../_static/archive/>`_. + `_, + with Robot Framework result files csit-vpp-perf-1704-all-.zip + `archived here <../../_static/archive/>`_ -VPP packet latency - running in configuration of **one worker thread (1t) on one -physical core (1c)** - is presented in the figure below. +VPP packet latency in 1t1c setup (1thread, 1core) is presented in the graph below. .. raw:: html -*Figure 1. VPP 1thread 1core - packet latency for Phy-to-Phy IPSEC HW.* +*Figure 1. VPP 1thread 1core - packet latency for Phy-to-Phy IPSec HW with IPv4 Routed-Forwarding.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf - $ grep -E "64B-1t1c-.*ipsec*" * + $ cd $CSIT/tests/perf + $ grep -E "64B-1t1c-.*ipsec.*-ndrdisc" * 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecscale1ip4-ip4base-interfaces-aes-gcm-ndrdisc 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc03-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-interfaces-aes-gcm-ndrdisc @@ -48,22 +58,22 @@ git repository by filtering with specified regex as follows: 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-cbc-sha1-ndrpdrdisc.robot:| tc03-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-tunnels-cbc-sha1-ndrdisc 40ge2p1xl710-ethip4ipsectptlispgpe-ip4base-cbc-sha1-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsectptlispgpe-ip4base-cbc-sha1-ndrdisc -VPP packet latency - running in configuration of **two worker threads (2t) on two -physical cores (2c)** - is presented in the figure below. + +VPP packet latency in 2t2c setup (2thread, 2core) is presented in the graph below. .. raw:: html -*Figure 2. VPP 2threads 2cores - packet latency for Phy-to-Phy IPSEC HW.* +*Figure 2. VPP 2threads 2cores - packet latency for Phy-to-Phy IPSec HW with IPv4 Routed-Forwarding.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf - $ grep -E "64B-2t2c-.*ipsec*" * + $ cd $CSIT/tests/perf + $ grep -E "64B-2t2c-.*ipsec.*-ndrdisc" * 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc13-64B-2t2c-ethip4ipsecscale1ip4-ip4base-interfaces-aes-gcm-ndrdisc 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc15-64B-2t2c-ethip4ipsecscale1000ip4-ip4base-interfaces-aes-gcm-ndrdisc diff --git a/docs/report/vpp_performance_tests/packet_latency_graphs/ipv4.rst b/docs/report/vpp_performance_tests/packet_latency_graphs/ipv4.rst index 21d38e2afe..71bbf5c643 100644 --- a/docs/report/vpp_performance_tests/packet_latency_graphs/ipv4.rst +++ b/docs/report/vpp_performance_tests/packet_latency_graphs/ipv4.rst @@ -1,28 +1,35 @@ IPv4 Routed-Forwarding ====================== -This section provides a summary of VPP Phy-to-Phy IPv4 Routed-Forwarding -performance illustrating packet latency measured at 50% of discovered NDR -throughput rate. Latency is reported for VPP running in multiple -configurations of VPP worker thread(s), a.k.a. VPP data plane thread (s), and -their physical CPU core(s) placement. - -*Title of each graph* is a regex (regular expression) matching all plotted -latency test cases, *X-axis labels* are indeces of csit-vpp-perf-1704 jobs -that created result output files used as data sources for the graph, -*Y-axis labels* are measured packet Latency [uSec] values, and the *graph -legend* identifes the plotted test suites. +This section includes summary graphs of VPP Phy-to-Phy packet latency +with IPv4 Routed-Forwarding measured at 50% of discovered NDR throughput +rate. Latency is reported for VPP running in multiple configurations of +VPP worker thread(s), a.k.a. VPP data plane thread(s), and their +physical CPU core(s) placement. + +Results are generated from a single execution of CSIT NDR discovery +test. Box plots are used to show the Minimum, Average and Maximum packet +latency per test. + +*Title of each graph* is a regex (regular expression) matching all +throughput test cases plotted on this graph, *X-axis labels* are indices +of individual test suites executed by csit-vpp-perf-1704-all job that +created result output file used as data source for the graph, *Y-axis +labels* are measured packet Latency [uSec] values, and the *Graph +legend* lists the plotted test suites and their indices. Latency is +reported for concurrent symmetric bi-directional flows, separately for +each direction: i) West-to-East: TGint1-to-SUT1-to-SUT2-to-TGint2, and +ii) East-to-West: TGint2-to-SUT2-to-SUT1-to-TGint1. .. note:: - Data sources for reported test results: i) FD.io test executor jobs + Test results have been generated by FD.io test executor jobs `csit-vpp-perf-1704-all - `_ , - ii) archived FD.io jobs test result `output files - <../../_static/archive/>`_. + `_, + with Robot Framework result files csit-vpp-perf-1704-all-.zip + `archived here <../../_static/archive/>`_ -VPP packet latency - running in configuration of **one worker thread (1t) on one -physical core (1c)** - is presented in the figure below. +VPP packet latency in 1t1c setup (1thread, 1core) is presented in the graph below. .. raw:: html @@ -30,25 +37,25 @@ physical core (1c)** - is presented in the figure below. *Figure 1. VPP 1thread 1core - packet latency for Phy-to-Phy IPv4 Routed-Forwarding.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "64B-1t1c-ethip4-ip4[a-z0-9]+-[a-z-]*ndrdisc" * - 10ge2p1x520-ethip4-ip4base-copwhtlistbase-ndrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-copwhtlistbase-ndrdisc - 10ge2p1x520-ethip4-ip4base-iacldstbase-ndrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-iacldstbase-ndrdisc - 10ge2p1x520-ethip4-ip4base-ipolicemarkbase-ndrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ipolicemarkbase-ndrdisc - 10ge2p1x520-ethip4-ip4base-ndrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ndrdisc - 10ge2p1x520-ethip4-ip4scale200k-ndrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale200k-ndrdisc - 10ge2p1x520-ethip4-ip4scale20k-ndrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale20k-ndrdisc - 10ge2p1x520-ethip4-ip4scale2m-ndrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale2m-ndrdisc - 40ge2p1xl710-ethip4-ip4base-ndrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ndrdisc + 10ge2p1x520-ethip4-ip4base-copwhtlistbase-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-copwhtlistbase-ndrdisc + 10ge2p1x520-ethip4-ip4base-iacldstbase-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-iacldstbase-ndrdisc + 10ge2p1x520-ethip4-ip4base-ipolicemarkbase-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ipolicemarkbase-ndrdisc + 10ge2p1x520-ethip4-ip4base-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ndrdisc + 10ge2p1x520-ethip4-ip4scale200k-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale200k-ndrdisc + 10ge2p1x520-ethip4-ip4scale20k-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale20k-ndrdisc + 10ge2p1x520-ethip4-ip4scale2m-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale2m-ndrdisc + 40ge2p1xl710-ethip4-ip4base-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ndrdisc -VPP packet latency - running in configuration of **two worker threads (2t) on two -physical cores (2c)** - is presented in the figure below. + +VPP packet latency in 2t2c setup (2thread, 2core) is presented in the graph below. .. raw:: html @@ -56,20 +63,20 @@ physical cores (2c)** - is presented in the figure below. *Figure 2. VPP 2threads 2cores - packet latency for Phy-to-Phy IPv4 Routed-Forwarding.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "64B-2t2c-ethip4-ip4[a-z0-9]+-[a-z-]*ndrdisc" * - 10ge2p1x520-ethip4-ip4base-copwhtlistbase-ndrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-copwhtlistbase-ndrdisc - 10ge2p1x520-ethip4-ip4base-iacldstbase-ndrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-iacldstbase-ndrdisc - 10ge2p1x520-ethip4-ip4base-ipolicemarkbase-ndrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ipolicemarkbase-ndrdisc - 10ge2p1x520-ethip4-ip4base-ndrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ndrdisc - 10ge2p1x520-ethip4-ip4scale200k-ndrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale200k-ndrdisc - 10ge2p1x520-ethip4-ip4scale20k-ndrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale20k-ndrdisc - 10ge2p1x520-ethip4-ip4scale2m-ndrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale2m-ndrdisc - 40ge2p1xl710-ethip4-ip4base-ndrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ndrdisc + 10ge2p1x520-ethip4-ip4base-copwhtlistbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-copwhtlistbase-ndrdisc + 10ge2p1x520-ethip4-ip4base-iacldstbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-iacldstbase-ndrdisc + 10ge2p1x520-ethip4-ip4base-ipolicemarkbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ipolicemarkbase-ndrdisc + 10ge2p1x520-ethip4-ip4base-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ndrdisc + 10ge2p1x520-ethip4-ip4scale200k-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale200k-ndrdisc + 10ge2p1x520-ethip4-ip4scale20k-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale20k-ndrdisc + 10ge2p1x520-ethip4-ip4scale2m-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale2m-ndrdisc + 40ge2p1xl710-ethip4-ip4base-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ndrdisc diff --git a/docs/report/vpp_performance_tests/packet_latency_graphs/ipv4_tunnels.rst b/docs/report/vpp_performance_tests/packet_latency_graphs/ipv4_tunnels.rst index 9abcbd8bbf..42f5ced1cd 100644 --- a/docs/report/vpp_performance_tests/packet_latency_graphs/ipv4_tunnels.rst +++ b/docs/report/vpp_performance_tests/packet_latency_graphs/ipv4_tunnels.rst @@ -1,28 +1,35 @@ IPv4 Overlay Tunnels ==================== -This section provides a summary of VPP Phy-to-Phy IPv4 Overlay Tunnels -performance illustrating packet latency measured at 50% of discovered NDR -throughput rate. Latency is reported for VPP running in multiple -configurations of VPP worker thread(s), a.k.a. VPP data plane thread (s), and -their physical CPU core(s) placement. - -*Title of each graph* is a regex (regular expression) matching all plotted -latency test cases, *X-axis labels* are indeces of csit-vpp-perf-1704 jobs -that created result output files used as data sources for the graph, -*Y-axis labels* are measured packet Latency [uSec] values, and the *graph -legend* identifes the plotted test suites. +This section includes summary graphs of VPP Phy-to-Phy packet latency +with IPv4 Overlay Tunnels measured at 50% of discovered NDR throughput +rate. Latency is reported for VPP running in multiple configurations of +VPP worker thread(s), a.k.a. VPP data plane thread(s), and their +physical CPU core(s) placement. + +Results are generated from a single execution of CSIT NDR discovery +test. Box plots are used to show the Minimum, Average and Maximum packet +latency per test. + +*Title of each graph* is a regex (regular expression) matching all +throughput test cases plotted on this graph, *X-axis labels* are indices +of individual test suites executed by csit-vpp-perf-1704-all job that +created result output file used as data source for the graph, *Y-axis +labels* are measured packet Latency [uSec] values, and the *Graph +legend* lists the plotted test suites and their indices. Latency is +reported for concurrent symmetric bi-directional flows, separately for +each direction: i) West-to-East: TGint1-to-SUT1-to-SUT2-to-TGint2, and +ii) East-to-West: TGint2-to-SUT2-to-SUT1-to-TGint1. .. note:: - Data sources for reported test results: i) FD.io test executor jobs + Test results have been generated by FD.io test executor jobs `csit-vpp-perf-1704-all - `_ , - ii) archived FD.io jobs test result `output files - <../../_static/archive/>`_. + `_, + with Robot Framework result files csit-vpp-perf-1704-all-.zip + `archived here <../../_static/archive/>`_ -VPP packet latency - running in configuration of **one worker thread (1t) on one -physical core (1c)** - is presented in the figure below. +VPP packet latency in 1t1c setup (1thread, 1core) is presented in the graph below. .. raw:: html @@ -30,12 +37,12 @@ physical core (1c)** - is presented in the figure below. *Figure 1. VPP 1thread 1core - packet latency for Phy-to-Phy IPv4 Overlay Tunnels.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "64B-1t1c-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc" * 10ge2p1x520-ethip4lispip4-ip4base-ndrdisc.robot:| tc01-64B-1t1c-ethip4lispip4-ip4base-ndrdisc @@ -43,8 +50,7 @@ git repository by filtering with specified regex as follows: 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrdisc.robot:| tc01-64B-1t1c-ethip4vxlan-l2bdbasemaclrn-ndrdisc 10ge2p1x520-ethip4vxlan-l2xcbase-ndrdisc.robot:| tc01-64B-1t1c-ethip4vxlan-l2xcbase-ndrdisc -VPP packet latency - running in configuration of **two worker threads (2t) on two -physical cores (2c)** - is presented in the figure below. +VPP packet latency in 2t2c setup (2thread, 2core) is presented in the graph below. .. raw:: html @@ -52,12 +58,12 @@ physical cores (2c)** - is presented in the figure below. *Figure 2. VPP 2threads 2cores - packet latency for Phy-to-Phy IPv4 Overlay Tunnels.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "64B-2t2c-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc" * 10ge2p1x520-ethip4lispip4-ip4base-ndrdisc.robot:| tc07-64B-2t2c-ethip4lispip4-ip4base-ndrdisc diff --git a/docs/report/vpp_performance_tests/packet_latency_graphs/ipv6.rst b/docs/report/vpp_performance_tests/packet_latency_graphs/ipv6.rst index adcb514af0..7d4cd70885 100644 --- a/docs/report/vpp_performance_tests/packet_latency_graphs/ipv6.rst +++ b/docs/report/vpp_performance_tests/packet_latency_graphs/ipv6.rst @@ -1,28 +1,35 @@ IPv6 Routed-Forwarding ====================== -This section provides a summary of VPP Phy-to-Phy IPv6 Routed-Forwarding -performance illustrating packet latency measured at 50% of discovered NDR -throughput rate. Latency is reported for VPP running in multiple -configurations of VPP worker thread(s), a.k.a. VPP data plane thread (s), and -their physical CPU core(s) placement. - -*Title of each graph* is a regex (regular expression) matching all plotted -latency test cases, *X-axis labels* are indeces of csit-vpp-perf-1704 jobs -that created result output files used as data sources for the graph, -*Y-axis labels* are measured packet Latency [uSec] values, and the *graph -legend* identifes the plotted test suites. +This section includes summary graphs of VPP Phy-to-Phy packet latency +with IPv6 Routed-Forwarding measured at 50% of discovered NDR throughput +rate. Latency is reported for VPP running in multiple configurations of +VPP worker thread(s), a.k.a. VPP data plane thread(s), and their +physical CPU core(s) placement. + +Results are generated from a single execution of CSIT NDR discovery +test. Box plots are used to show the Minimum, Average and Maximum packet +latency per test. + +*Title of each graph* is a regex (regular expression) matching all +throughput test cases plotted on this graph, *X-axis labels* are indices +of individual test suites executed by csit-vpp-perf-1704-all job that +created result output file used as data source for the graph, *Y-axis +labels* are measured packet Latency [uSec] values, and the *Graph +legend* lists the plotted test suites and their indices. Latency is +reported for concurrent symmetric bi-directional flows, separately for +each direction: i) West-to-East: TGint1-to-SUT1-to-SUT2-to-TGint2, and +ii) East-to-West: TGint2-to-SUT2-to-SUT1-to-TGint1. .. note:: - Data sources for reported test results: i) FD.io test executor jobs + Test results have been generated by FD.io test executor jobs `csit-vpp-perf-1704-all - `_ , - ii) archived FD.io jobs test result `output files - <../../_static/archive/>`_. + `_, + with Robot Framework result files csit-vpp-perf-1704-all-.zip + `archived here <../../_static/archive/>`_ -VPP packet latency - running in configuration of **one worker thread (1t) on one -physical core (1c)** - is presented in the figure below. +VPP packet latency in 1t1c setup (1thread, 1core) is presented in the graph below. .. raw:: html @@ -30,12 +37,12 @@ physical core (1c)** - is presented in the figure below. *Figure 1. VPP 1thread 1core - packet latency for Phy-to-Phy IPv6 Routed-Forwarding.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "78B-1t1c-ethip6-ip6[a-z0-9]+-[a-z-]*ndrdisc" * 10ge2p1x520-ethip6-ip6base-copwhtlistbase-ndrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-copwhtlistbase-ndrdisc @@ -46,8 +53,7 @@ git repository by filtering with specified regex as follows: 10ge2p1x520-ethip6-ip6scale2m-ndrdisc.robot:| tc01-78B-1t1c-ethip6-ip6scale2m-ndrdisc 40ge2p1xl710-ethip6-ip6base-ndrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-ndrdisc -VPP packet latency - running in configuration of **two worker threads (2t) on two -physical cores (2c)** - is presented in the figure below. +VPP packet latency in 2t2c setup (2thread, 2core) is presented in the graph below. .. raw:: html @@ -55,12 +61,12 @@ physical cores (2c)** - is presented in the figure below. *Figure 2. VPP 2threads 2cores - packet latency for Phy-to-Phy IPv6 Routed-Forwarding.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "78B-2t2c-ethip6-ip6[a-z0-9]+-[a-z-]*ndrdisc" * 10ge2p1x520-ethip6-ip6base-copwhtlistbase-ndrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-copwhtlistbase-ndrdisc diff --git a/docs/report/vpp_performance_tests/packet_latency_graphs/ipv6_tunnels.rst b/docs/report/vpp_performance_tests/packet_latency_graphs/ipv6_tunnels.rst index 8861009728..c54509d3a3 100644 --- a/docs/report/vpp_performance_tests/packet_latency_graphs/ipv6_tunnels.rst +++ b/docs/report/vpp_performance_tests/packet_latency_graphs/ipv6_tunnels.rst @@ -1,28 +1,35 @@ IPv6 Overlay Tunnels ==================== -This section provides a summary of VPP Phy-to-Phy IPv6 Overlay Tunnels -performance illustrating packet latency measured at 50% of discovered NDR -throughput rate. Latency is reported for VPP running in multiple -configurations of VPP worker thread(s), a.k.a. VPP data plane thread (s), and -their physical CPU core(s) placement. - -*Title of each graph* is a regex (regular expression) matching all plotted -latency test cases, *X-axis labels* are indeces of csit-vpp-perf-1704 jobs -that created result output files used as data sources for the graph, -*Y-axis labels* are measured packet Latency [uSec] values, and the *graph -legend* identifes the plotted test suites. +This section includes summary graphs of VPP Phy-to-Phy packet latency +with IPv6 Overlay Tunnels measured at 50% of discovered NDR throughput +rate. Latency is reported for VPP running in multiple configurations of +VPP worker thread(s), a.k.a. VPP data plane thread(s), and their +physical CPU core(s) placement. + +Results are generated from a single execution of CSIT NDR discovery +test. Box plots are used to show the Minimum, Average and Maximum packet +latency per test. + +*Title of each graph* is a regex (regular expression) matching all +throughput test cases plotted on this graph, *X-axis labels* are indices +of individual test suites executed by csit-vpp-perf-1704-all job that +created result output file used as data source for the graph, *Y-axis +labels* are measured packet Latency [uSec] values, and the *Graph +legend* lists the plotted test suites and their indices. Latency is +reported for concurrent symmetric bi-directional flows, separately for +each direction: i) West-to-East: TGint1-to-SUT1-to-SUT2-to-TGint2, and +ii) East-to-West: TGint2-to-SUT2-to-SUT1-to-TGint1. .. note:: - Data sources for reported test results: i) FD.io test executor jobs + Test results have been generated by FD.io test executor jobs `csit-vpp-perf-1704-all - `_ , - ii) archived FD.io jobs test result `output files - <../../_static/archive/>`_. + `_, + with Robot Framework result files csit-vpp-perf-1704-all-.zip + `archived here <../../_static/archive/>`_ -VPP packet latency - running in configuration of **one worker thread (1t) on one -physical core (1c)** - is presented in the figure below. +VPP packet latency in 1t1c setup (1thread, 1core) is presented in the graph below. .. raw:: html @@ -30,19 +37,18 @@ physical core (1c)** - is presented in the figure below. *Figure 1. VPP 1thread 1core - packet latency for Phy-to-Phy IPv6 Overlay Tunnels.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "78B-1t1c-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc" * 10ge2p1x520-ethip6lispip4-ip6base-ndrdisc.robot:| tc01-78B-1t1c-ethip6lispip4-ip6base-ndrdisc 10ge2p1x520-ethip6lispip6-ip6base-ndrdisc.robot:| tc01-78B-1t1c-ethip6lispip6-ip6base-ndrdisc -VPP packet latency - running in configuration of **two worker threads (2t) on two -physical cores (2c)** - is presented in the figure below. +VPP packet latency in 2t2c setup (2thread, 2core) is presented in the graph below. .. raw:: html @@ -50,12 +56,12 @@ physical cores (2c)** - is presented in the figure below. *Figure 2. VPP 2threads 2cores - packet latency for Phy-to-Phy IPv6 Overlay Tunnels.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "78B-2t2c-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc" * 10ge2p1x520-ethip6lispip4-ip6base-ndrdisc.robot:| tc07-78B-2t2c-ethip6lispip4-ip6base-ndrdisc diff --git a/docs/report/vpp_performance_tests/packet_latency_graphs/l2.rst b/docs/report/vpp_performance_tests/packet_latency_graphs/l2.rst index 7eba1062f2..f7836feeef 100644 --- a/docs/report/vpp_performance_tests/packet_latency_graphs/l2.rst +++ b/docs/report/vpp_performance_tests/packet_latency_graphs/l2.rst @@ -1,28 +1,35 @@ L2 Ethernet Switching ===================== -This section provides a summary of VPP Phy-to-Phy L2 Ethernet switching -performance illustrating packet latency measured at 50% of discovered NDR -throughput rate. Latency is reported for VPP running in multiple -configurations of VPP worker thread(s), a.k.a. VPP data plane thread (s), and -their physical CPU core(s) placement. - -*Title of each graph* is a regex (regular expression) matching all plotted -latency test cases, *X-axis labels* are indeces of csit-vpp-perf-1704 jobs -that created result output files used as data sources for the graph, -*Y-axis labels* are measured packet Latency [uSec] values, and the *graph -legend* identifes the plotted test suites. +This section includes summary graphs of VPP Phy-to-Phy packet latency +with L2 Ethernet switching measured at 50% of discovered NDR throughput +rate. Latency is reported for VPP running in multiple configurations of +VPP worker thread(s), a.k.a. VPP data plane thread(s), and their +physical CPU core(s) placement. + +Results are generated from a single execution of CSIT NDR discovery +test. Box plots are used to show the Minimum, Average and Maximum packet +latency per test. + +*Title of each graph* is a regex (regular expression) matching all +throughput test cases plotted on this graph, *X-axis labels* are indices +of individual test suites executed by csit-vpp-perf-1704-all job that +created result output file used as data source for the graph, *Y-axis +labels* are measured packet Latency [uSec] values, and the *Graph +legend* lists the plotted test suites and their indices. Latency is +reported for concurrent symmetric bi-directional flows, separately for +each direction: i) West-to-East: TGint1-to-SUT1-to-SUT2-to-TGint2, and +ii) East-to-West: TGint2-to-SUT2-to-SUT1-to-TGint1. .. note:: - Data sources for reported test results: i) FD.io test executor jobs + Test results have been generated by FD.io test executor jobs `csit-vpp-perf-1704-all - `_ , - ii) archived FD.io jobs test result `output files - <../../_static/archive/>`_. + `_, + with Robot Framework result files csit-vpp-perf-1704-all-.zip + `archived here <../../_static/archive/>`_ -VPP packet latency - running in configuration of **one worker thread (1t) on one -physical core (1c)** - is presented in the figure below. +VPP packet latency in 1t1c setup (1thread, 1core) is presented in the graph below. .. raw:: html @@ -30,12 +37,12 @@ physical core (1c)** - is presented in the figure below. *Figure 1. VPP 1thread 1core - packet latency for Phy-to-Phy L2 Ethernet Switching.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" * 10ge2p1vic1227-eth-l2bdbasemaclrn-ndrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc @@ -48,8 +55,7 @@ git repository by filtering with specified regex as follows: 40ge2p1xl710-eth-l2bdbasemaclrn-ndrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc 40ge2p1xl710-eth-l2xcbase-ndrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-ndrdisc -VPP packet latency - running in configuration of **two worker threads (2t) on two -physical cores (2c)** - is presented in the figure below. +VPP packet latency in 2t2c setup (2thread, 2core) is presented in the graph below. .. raw:: html @@ -57,12 +63,12 @@ physical cores (2c)** - is presented in the figure below. *Figure 2. VPP 2threads 2cores - packet latency for Phy-to-Phy L2 Ethernet Switching.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" * 10ge2p1vic1227-eth-l2bdbasemaclrn-ndrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc diff --git a/docs/report/vpp_performance_tests/packet_latency_graphs/vm_vhost.rst b/docs/report/vpp_performance_tests/packet_latency_graphs/vm_vhost.rst index 785bc13c3d..2e3071d6d8 100644 --- a/docs/report/vpp_performance_tests/packet_latency_graphs/vm_vhost.rst +++ b/docs/report/vpp_performance_tests/packet_latency_graphs/vm_vhost.rst @@ -1,28 +1,36 @@ VM vhost Connections ==================== -This section provides a summary of VPP Phy-to-VM-to-Phy VM vhost-user -performance illustrating packet latency measured at 50% of discovered NDR -throughput rate. Latency is reported for VPP running in multiple -configurations of VPP worker thread(s), a.k.a. VPP data plane thread (s), and -their physical CPU core(s) placement. - -*Title of each graph* is a regex (regular expression) matching all plotted -latency test cases, *X-axis labels* are indeces of csit-vpp-perf-1704 jobs -that created result output files used as data sources for the graph, -*Y-axis labels* are measured packet Latency [uSec] values, and the *graph -legend* identifes the plotted test suites. +This section includes summary graphs of VPP Phy-to-VM(s)-to-Phy packet +latency with with VM virtio and VPP vhost-user virtual interfaces +measured at 50% of discovered NDR throughput rate. Latency is reported +for VPP running in multiple configurations of VPP worker thread(s), +a.k.a. VPP data plane thread(s), and their physical CPU core(s) +placement. + +Results are generated from a single execution of CSIT NDR discovery +test. Box plots are used to show the Minimum, Average and Maximum packet +latency per test. + +*Title of each graph* is a regex (regular expression) matching all +throughput test cases plotted on this graph, *X-axis labels* are indices +of individual test suites executed by csit-vpp-perf-1704-all job that +created result output file used as data source for the graph, *Y-axis +labels* are measured packet Latency [uSec] values, and the *Graph +legend* lists the plotted test suites and their indices. Latency is +reported for concurrent symmetric bi-directional flows, separately for +each direction: i) West-to-East: TGint1-to-SUT1-to-SUT2-to-TGint2, and +ii) East-to-West: TGint2-to-SUT2-to-SUT1-to-TGint1. .. note:: - Data sources for reported test results: i) FD.io test executor jobs + Test results have been generated by FD.io test executor jobs `csit-vpp-perf-1704-all - `_ , - ii) archived FD.io jobs test result `output files - <../../_static/archive/>`_. + `_, + with Robot Framework result files csit-vpp-perf-1704-all-.zip + `archived here <../../_static/archive/>`_ -VPP packet latency - running in configuration of **one worker thread (1t) on one -physical core (1c)** - is presented in the figure below. +VPP packet latency in 1t1c setup (1thread, 1core) is presented in the graph below. .. raw:: html @@ -30,12 +38,12 @@ physical core (1c)** - is presented in the figure below. *Figure 1. VPP 1thread 1core - packet latency for Phy-to-VM-to-Phy VM vhost-user.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "64B-1t1c-.*vhost.*-ndrdisc" * 10ge2p1x520-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc @@ -53,8 +61,7 @@ git repository by filtering with specified regex as follows: 40ge2p1xl710-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrdisc 40ge2p1xl710-eth-l2xcbase-eth-4vhost-2vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-eth-4vhost-2vm-ndrdisc -VPP packet latency - running in configuration of **two worker threads (2t) on two -physical cores (2c)** - is presented in the figure below. +VPP packet latency in 2t2c setup (2thread, 2core) is presented in the graph below. .. raw:: html @@ -62,12 +69,12 @@ physical cores (2c)** - is presented in the figure below. *Figure 2. VPP 2threads 2cores - packet latency for Phy-to-VM-to-Phy VM vhost-user.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "64B-2t2c-.*vhost.*-ndrdisc" * 10ge2p1x520-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc diff --git a/docs/report/vpp_performance_tests/packet_throughput_graphs/ipsec.rst b/docs/report/vpp_performance_tests/packet_throughput_graphs/ipsec.rst index 6f56cae134..030f4ddafe 100644 --- a/docs/report/vpp_performance_tests/packet_throughput_graphs/ipsec.rst +++ b/docs/report/vpp_performance_tests/packet_throughput_graphs/ipsec.rst @@ -1,25 +1,35 @@ -Crypto in hardware: IP4FWD, IP6FWD -================================== - -Following sections provide a summary of VPP Phy-to-Phy IPSEC HW -performance illustrating NDR throughput (zero packet loss) and PDR throughput -(<0.5% packet loss). Performance is reported for VPP running in multiple -configurations of VPP worker thread(s), a.k.a. VPP data plane thread (s), and -their physical CPU core(s) placement. - -*Title of each graph* is a regex (regular expression) matching all plotted -throughput test cases, *X-axis labels* are indeces of csit-vpp-perf-1704 jobs -that created result output files used as data sources for the graph, -*Y-axis labels* are measured Packets Per Second [pps] values, and the *graph -legend* identifes the plotted test suites. +IPSec Crypto HW: IP4 Routed-Forwarding +====================================== + +Following sections include summary graphs of VPP Phy-to-Phy performance with +IPSec encryption used in combination with IPv4 routed-forwarding, +including NDR throughput (zero packet loss) and PDR throughput (<0.5% +packet loss). VPP IPSec encryption is accelerated using DPDK cryptodev +library driving Intel Quick Assist (QAT) crypto PCIe hardware cards. +Performance is reported for VPP running in multiple configurations of +VPP worker thread(s), a.k.a. VPP data plane thread(s), and their +physical CPU core(s) placement. + +Results are generated by multiple executions of the same CSIT tests. +In order to display variation in measured throughput values, Box-and- +whisker plots are used to show their quartiles (Min, 1st quartile / 25th +percentile, 2nd quartile / 50th percentile / mean, 3rd quartile / 75th +percentile, Max). Outliers are plotted as individual points. + +*Title of each graph* is a regex (regular expression) matching all +throughput test cases plotted on this graph, *X-axis labels* are indices +of individual test suites executed by csit-vpp-perf-1704-all jobs that +created result output files used as data sources for the graph, *Y-axis +labels* are measured Packets Per Second [pps] values, and the *Graph +legend* lists the plotted test suites and their indices. .. note:: - Data sources for reported test results: i) FD.io test executor jobs + Test results have been generated by FD.io test executor jobs `csit-vpp-perf-1704-all - `_ , - ii) archived FD.io jobs test result `output files - <../../_static/archive/>`_. + `_, + with Robot Framework result files csit-vpp-perf-1704-all-.zip + `archived here <../../_static/archive/>`_ NDR Throughput ~~~~~~~~~~~~~~ @@ -33,13 +43,13 @@ one physical core (1c)** - is presented in the figure below. *Figure 1. VPP 1thread 1core - NDR Throughput for Phy-to-Phy IPSEC HW.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf - $ grep -E "64B-1t1c-.*ipsec*" * + $ cd $CSIT/tests/perf + $ grep -E "64B-1t1c-.*ipsec.*-ndrdisc" * 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecscale1ip4-ip4base-interfaces-aes-gcm-ndrdisc 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc03-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-interfaces-aes-gcm-ndrdisc @@ -51,6 +61,7 @@ git repository by filtering with specified regex as follows: 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-cbc-sha1-ndrpdrdisc.robot:| tc03-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-tunnels-cbc-sha1-ndrdisc 40ge2p1xl710-ethip4ipsectptlispgpe-ip4base-cbc-sha1-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsectptlispgpe-ip4base-cbc-sha1-ndrdisc + VPP NDR Throughput - running in configuration of **two worker threads (2t) on two physical cores (2c)** - is presented in the figure below. @@ -60,13 +71,13 @@ two physical cores (2c)** - is presented in the figure below. *Figure 2. VPP 2threads 2cores - NDR Throughput for Phy-to-Phy IPSEC HW.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf - $ grep -E "64B-2t2c-.*ipsec*" * + $ cd $CSIT/tests/perf + $ grep -E "64B-2t2c-.*ipsec.*-ndrdisc" * 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc13-64B-2t2c-ethip4ipsecscale1ip4-ip4base-interfaces-aes-gcm-ndrdisc 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc15-64B-2t2c-ethip4ipsecscale1000ip4-ip4base-interfaces-aes-gcm-ndrdisc @@ -91,23 +102,23 @@ packet loss ratio. *Figure 3. VPP 1thread 1core - PDR Throughput for Phy-to-Phy IPSEC HW.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf - $ grep -E "64B-1t1c-.*ipsec*" * + $ cd $CSIT/tests/perf + $ grep -E "64B-1t1c-.*ipsec.*-pdrdisc" * - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecscale1ip4-ip4base-interfaces-aes-gcm-pdrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc03-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-interfaces-aes-gcm-pdrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-cbc-sha1-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecscale1ip4-ip4base-interfaces-cbc-sha1-pdrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-cbc-sha1-ndrpdrdisc.robot:| tc03-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-interfaces-cbc-sha1-pdrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-aes-gcm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecscale1ip4-ip4base-tunnels-aes-gcm-pdrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-aes-gcm-ndrpdrdisc.robot:| tc03-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-tunnels-aes-gcm-pdrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-cbc-sha1-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsecscale1ip4-ip4base-tunnels-cbc-sha1-pdrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-cbc-sha1-ndrpdrdisc.robot:| tc03-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-tunnels-cbc-sha1-pdrdisc - 40ge2p1xl710-ethip4ipsectptlispgpe-ip4base-cbc-sha1-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4ipsectptlispgpe-ip4base-cbc-sha1-pdrdisc + 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4ipsecscale1ip4-ip4base-interfaces-aes-gcm-pdrdisc + 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc04-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-interfaces-aes-gcm-pdrdisc + 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-cbc-sha1-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4ipsecscale1ip4-ip4base-interfaces-cbc-sha1-pdrdisc + 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-cbc-sha1-ndrpdrdisc.robot:| tc04-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-interfaces-cbc-sha1-pdrdisc + 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-aes-gcm-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4ipsecscale1ip4-ip4base-tunnels-aes-gcm-pdrdisc + 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-aes-gcm-ndrpdrdisc.robot:| tc04-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-tunnels-aes-gcm-pdrdisc + 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-cbc-sha1-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4ipsecscale1ip4-ip4base-tunnels-cbc-sha1-pdrdisc + 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-cbc-sha1-ndrpdrdisc.robot:| tc04-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-tunnels-cbc-sha1-pdrdisc + 40ge2p1xl710-ethip4ipsectptlispgpe-ip4base-cbc-sha1-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4ipsectptlispgpe-ip4base-cbc-sha1-pdrdisc VPP PDR Throughput - running in configuration of **two worker threads (2t) on @@ -119,21 +130,21 @@ two physical cores (2c)** - is presented in the figure below. *Figure 4. VPP 2thread 2core - PDR Throughput for Phy-to-Phy IPSEC HW.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf - $ grep -E "64B-1t1c-.*ipsec*" * - - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4ipsecscale1ip4-ip4base-interfaces-aes-gcm-pdrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc04-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-interfaces-aes-gcm-pdrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-cbc-sha1-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4ipsecscale1ip4-ip4base-interfaces-cbc-sha1-pdrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-cbc-sha1-ndrpdrdisc.robot:| tc04-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-interfaces-cbc-sha1-pdrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-aes-gcm-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4ipsecscale1ip4-ip4base-tunnels-aes-gcm-pdrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-aes-gcm-ndrpdrdisc.robot:| tc04-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-tunnels-aes-gcm-pdrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-cbc-sha1-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4ipsecscale1ip4-ip4base-tunnels-cbc-sha1-pdrdisc - 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-cbc-sha1-ndrpdrdisc.robot:| tc04-64B-1t1c-ethip4ipsecscale1000ip4-ip4base-tunnels-cbc-sha1-pdrdisc - 40ge2p1xl710-ethip4ipsectptlispgpe-ip4base-cbc-sha1-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4ipsectptlispgpe-ip4base-cbc-sha1-pdrdisc + $ cd $CSIT/tests/perf + $ grep -E "64B-2t2c-.*ipsec.*-pdrdisc" * + + 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc14-64B-2t2c-ethip4ipsecscale1ip4-ip4base-interfaces-aes-gcm-pdrdisc + 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-aes-gcm-ndrpdrdisc.robot:| tc16-64B-2t2c-ethip4ipsecscale1000ip4-ip4base-interfaces-aes-gcm-pdrdisc + 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-cbc-sha1-ndrpdrdisc.robot:| tc14-64B-2t2c-ethip4ipsecscale1ip4-ip4base-interfaces-cbc-sha1-pdrdisc + 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-interfaces-cbc-sha1-ndrpdrdisc.robot:| tc16-64B-2t2c-ethip4ipsecscale1000ip4-ip4base-interfaces-cbc-sha1-pdrdisc + 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-aes-gcm-ndrpdrdisc.robot:| tc14-64B-2t2c-ethip4ipsecscale1ip4-ip4base-tunnels-aes-gcm-pdrdisc + 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-aes-gcm-ndrpdrdisc.robot:| tc16-64B-2t2c-ethip4ipsecscale1000ip4-ip4base-tunnels-aes-gcm-pdrdisc + 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-cbc-sha1-ndrpdrdisc.robot:| tc14-64B-2t2c-ethip4ipsecscale1ip4-ip4base-tunnels-cbc-sha1-pdrdisc + 40ge2p1xl710-ethip4ipsecscaleip4-ip4base-tunnels-cbc-sha1-ndrpdrdisc.robot:| tc16-64B-2t2c-ethip4ipsecscale1000ip4-ip4base-tunnels-cbc-sha1-pdrdisc + 40ge2p1xl710-ethip4ipsectptlispgpe-ip4base-cbc-sha1-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4ipsectptlispgpe-ip4base-cbc-sha1-pdrdisc diff --git a/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv4.rst b/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv4.rst index eab9b061bc..41c0a1cb91 100644 --- a/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv4.rst +++ b/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv4.rst @@ -1,31 +1,38 @@ IPv4 Routed-Forwarding ====================== -Following sections provide a summary of VPP Phy-to-Phy IPv4 Routed-Forwarding -performance illustrating NDR throughput (zero packet loss) and PDR throughput -(<0.5% packet loss). Performance is reported for VPP running in multiple -configurations of VPP worker thread(s), a.k.a. VPP data plane thread (s), and -their physical CPU core(s) placement. - -*Title of each graph* is a regex (regular expression) matching all plotted -throughput test cases, *X-axis labels* are indeces of csit-vpp-perf-1704 jobs -that created result output files used as data sources for the graph, -*Y-axis labels* are measured Packets Per Second [pps] values, and the *graph -legend* identifes the plotted test suites. +Following sections include summary graphs of VPP Phy-to-Phy performance +with IPv4 Routed-Forwarding, including NDR throughput (zero packet loss) +and PDR throughput (<0.5% packet loss). Performance is reported for VPP +running in multiple configurations of VPP worker thread(s), a.k.a. VPP +data plane thread(s), and their physical CPU core(s) placement. + +Results are generated by multiple executions of the same CSIT tests. +In order to display variation in measured throughput values, Box-and- +whisker plots are used to show their quartiles (Min, 1st quartile / 25th +percentile, 2nd quartile / 50th percentile / mean, 3rd quartile / 75th +percentile, Max). Outliers are plotted as individual points. + +*Title of each graph* is a regex (regular expression) matching all +throughput test cases plotted on this graph, *X-axis labels* are indices +of individual test suites executed by csit-vpp-perf-1704-all jobs that +created result output files used as data sources for the graph, *Y-axis +labels* are measured Packets Per Second [pps] values, and the *Graph +legend* lists the plotted test suites and their indices. .. note:: - Data sources for reported test results: i) FD.io test executor jobs + Test results have been generated by FD.io test executor jobs `csit-vpp-perf-1704-all - `_ , - ii) archived FD.io jobs test result `output files - <../../_static/archive/>`_. + `_, + with Robot Framework result files csit-vpp-perf-1704-all-.zip + `archived here <../../_static/archive/>`_ NDR Throughput ~~~~~~~~~~~~~~ -VPP NDR Throughput - running in configuration of **one worker thread (1t) on -one physical core (1c)** - is presented in the figure below. +VPP NDR 64B packet throughput in 1t1c setup (1thread, 1core) is presented +in the graph below. .. raw:: html @@ -33,25 +40,25 @@ one physical core (1c)** - is presented in the figure below. *Figure 1. VPP 1thread 1core - NDR Throughput for Phy-to-Phy IPv4 Routed-Forwarding.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "64B-1t1c-ethip4-ip4[a-z0-9]+-[a-z-]*ndrdisc" * - 10ge2p1x520-ethip4-ip4base-copwhtlistbase-ndrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-copwhtlistbase-ndrdisc - 10ge2p1x520-ethip4-ip4base-iacldstbase-ndrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-iacldstbase-ndrdisc - 10ge2p1x520-ethip4-ip4base-ipolicemarkbase-ndrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ipolicemarkbase-ndrdisc - 10ge2p1x520-ethip4-ip4base-ndrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ndrdisc - 10ge2p1x520-ethip4-ip4scale200k-ndrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale200k-ndrdisc - 10ge2p1x520-ethip4-ip4scale20k-ndrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale20k-ndrdisc - 10ge2p1x520-ethip4-ip4scale2m-ndrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale2m-ndrdisc - 40ge2p1xl710-ethip4-ip4base-ndrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ndrdisc + 10ge2p1x520-ethip4-ip4base-copwhtlistbase-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-copwhtlistbase-ndrdisc + 10ge2p1x520-ethip4-ip4base-iacldstbase-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-iacldstbase-ndrdisc + 10ge2p1x520-ethip4-ip4base-ipolicemarkbase-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ipolicemarkbase-ndrdisc + 10ge2p1x520-ethip4-ip4base-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ndrdisc + 10ge2p1x520-ethip4-ip4scale200k-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale200k-ndrdisc + 10ge2p1x520-ethip4-ip4scale20k-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale20k-ndrdisc + 10ge2p1x520-ethip4-ip4scale2m-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4scale2m-ndrdisc + 40ge2p1xl710-ethip4-ip4base-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-ndrdisc -VPP NDR Throughput - running in configuration of **two worker threads (2t) on -two physical cores (2c)** - is presented in the figure below. +VPP NDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented +in the graph below. .. raw:: html @@ -60,29 +67,28 @@ two physical cores (2c)** - is presented in the figure below. *Figure 2. VPP 2threads 2cores - NDR Throughput for Phy-to-Phy IPv4 Routed-Forwarding.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "64B-2t2c-ethip4-ip4[a-z0-9]+-[a-z-]*ndrdisc" * - 10ge2p1x520-ethip4-ip4base-copwhtlistbase-ndrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-copwhtlistbase-ndrdisc - 10ge2p1x520-ethip4-ip4base-iacldstbase-ndrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-iacldstbase-ndrdisc - 10ge2p1x520-ethip4-ip4base-ipolicemarkbase-ndrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ipolicemarkbase-ndrdisc - 10ge2p1x520-ethip4-ip4base-ndrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ndrdisc - 10ge2p1x520-ethip4-ip4scale200k-ndrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale200k-ndrdisc - 10ge2p1x520-ethip4-ip4scale20k-ndrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale20k-ndrdisc - 10ge2p1x520-ethip4-ip4scale2m-ndrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale2m-ndrdisc - 40ge2p1xl710-ethip4-ip4base-ndrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ndrdisc + 10ge2p1x520-ethip4-ip4base-copwhtlistbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-copwhtlistbase-ndrdisc + 10ge2p1x520-ethip4-ip4base-iacldstbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-iacldstbase-ndrdisc + 10ge2p1x520-ethip4-ip4base-ipolicemarkbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ipolicemarkbase-ndrdisc + 10ge2p1x520-ethip4-ip4base-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ndrdisc + 10ge2p1x520-ethip4-ip4scale200k-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale200k-ndrdisc + 10ge2p1x520-ethip4-ip4scale20k-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale20k-ndrdisc + 10ge2p1x520-ethip4-ip4scale2m-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4scale2m-ndrdisc + 40ge2p1xl710-ethip4-ip4base-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-ndrdisc PDR Throughput ~~~~~~~~~~~~~~ -VPP PDR Throughput - running in configuration of **one worker thread (1t) on one -physical core (1c)** - is presented in the figure below. PDR at below 0.5% -packet loss ratio. +VPP PDR 64B packet throughput in 1t1c setup (1thread, 1core) is presented +in the graph below. PDR measured for 0.5% packet loss ratio. .. raw:: html @@ -91,24 +97,24 @@ packet loss ratio. *Figure 3. VPP 1thread 1core - PDR Throughput for Phy-to-Phy IPv4 Routed-Forwarding.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "64B-1t1c-ethip4-ip4[a-z0-9]+-[a-z-]*pdrdisc" * - 10ge2p1x520-ethip4-ip4base-copwhtlistbase-ndrdisc.robot:| tc02-64B-1t1c-ethip4-ip4base-copwhtlistbase-pdrdisc - 10ge2p1x520-ethip4-ip4base-iacldstbase-ndrdisc.robot:| tc02-64B-1t1c-ethip4-ip4base-iacldstbase-pdrdisc - 10ge2p1x520-ethip4-ip4base-ipolicemarkbase-ndrdisc.robot:| tc02-64B-1t1c-ethip4-ip4base-ipolicemarkbase-pdrdisc - 10ge2p1x520-ethip4-ip4base-ndrdisc.robot:| tc02-64B-1t1c-ethip4-ip4base-pdrdisc - 10ge2p1x520-ethip4-ip4scale200k-ndrdisc.robot:| tc02-64B-1t1c-ethip4-ip4scale200k-pdrdisc - 10ge2p1x520-ethip4-ip4scale20k-ndrdisc.robot:| tc02-64B-1t1c-ethip4-ip4scale20k-pdrdisc - 10ge2p1x520-ethip4-ip4scale2m-ndrdisc.robot:| tc02-64B-1t1c-ethip4-ip4scale2m-pdrdisc + 10ge2p1x520-ethip4-ip4base-copwhtlistbase-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4base-copwhtlistbase-pdrdisc + 10ge2p1x520-ethip4-ip4base-iacldstbase-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4base-iacldstbase-pdrdisc + 10ge2p1x520-ethip4-ip4base-ipolicemarkbase-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4base-ipolicemarkbase-pdrdisc + 10ge2p1x520-ethip4-ip4base-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4base-pdrdisc + 10ge2p1x520-ethip4-ip4scale200k-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4scale200k-pdrdisc + 10ge2p1x520-ethip4-ip4scale20k-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4scale20k-pdrdisc + 10ge2p1x520-ethip4-ip4scale2m-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4scale2m-pdrdisc -VPP PDR Throughput - running in configuration of **two worker threads (2t) on -two physical cores (2c)** - is presented in the figure below. +VPP PDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented +in the graph below. PDR measured for 0.5% packet loss ratio. .. raw:: html @@ -117,19 +123,19 @@ two physical cores (2c)** - is presented in the figure below. *Figure 4. VPP 2thread 2core - PDR Throughput for Phy-to-Phy IPv4 Routed-Forwarding.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "64B-2t2c-ethip4-ip4[a-z0-9]+-[a-z-]*pdrdisc" * - 10ge2p1x520-ethip4-ip4base-copwhtlistbase-ndrdisc.robot:| tc08-64B-2t2c-ethip4-ip4base-copwhtlistbase-pdrdisc - 10ge2p1x520-ethip4-ip4base-iacldstbase-ndrdisc.robot:| tc08-64B-2t2c-ethip4-ip4base-iacldstbase-pdrdisc - 10ge2p1x520-ethip4-ip4base-ipolicemarkbase-ndrdisc.robot:| tc08-64B-2t2c-ethip4-ip4base-ipolicemarkbase-pdrdisc - 10ge2p1x520-ethip4-ip4base-ndrdisc.robot:| tc08-64B-2t2c-ethip4-ip4base-pdrdisc - 10ge2p1x520-ethip4-ip4scale200k-ndrdisc.robot:| tc08-64B-2t2c-ethip4-ip4scale200k-pdrdisc - 10ge2p1x520-ethip4-ip4scale20k-ndrdisc.robot:| tc08-64B-2t2c-ethip4-ip4scale20k-pdrdisc - 10ge2p1x520-ethip4-ip4scale2m-ndrdisc.robot:| tc08-64B-2t2c-ethip4-ip4scale2m-pdrdisc + 10ge2p1x520-ethip4-ip4base-copwhtlistbase-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4-ip4base-copwhtlistbase-pdrdisc + 10ge2p1x520-ethip4-ip4base-iacldstbase-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4-ip4base-iacldstbase-pdrdisc + 10ge2p1x520-ethip4-ip4base-ipolicemarkbase-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4-ip4base-ipolicemarkbase-pdrdisc + 10ge2p1x520-ethip4-ip4base-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4-ip4base-pdrdisc + 10ge2p1x520-ethip4-ip4scale200k-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4-ip4scale200k-pdrdisc + 10ge2p1x520-ethip4-ip4scale20k-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4-ip4scale20k-pdrdisc + 10ge2p1x520-ethip4-ip4scale2m-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4-ip4scale2m-pdrdisc diff --git a/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv4_tunnels.rst b/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv4_tunnels.rst index 1b300ca34a..f99c0a502e 100644 --- a/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv4_tunnels.rst +++ b/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv4_tunnels.rst @@ -1,31 +1,38 @@ IPv4 Overlay Tunnels ==================== -Following sections provide a summary of VPP Phy-to-Phy IPv4 Overlay Tunnels -performance illustrating NDR throughput (zero packet loss) and PDR throughput -(<0.5% packet loss). Performance is reported for VPP running in multiple -configurations of VPP worker thread(s), a.k.a. VPP data plane thread (s), and -their physical CPU core(s) placement. - -*Title of each graph* is a regex (regular expression) matching all plotted -throughput test cases, *X-axis labels* are indeces of csit-vpp-perf-1704 jobs -that created result output files used as data sources for the graph, -*Y-axis labels* are measured Packets Per Second [pps] values, and the *graph -legend* identifes the plotted test suites. +Following sections include summary graphs of VPP Phy-to-Phy performance +with IPv4 Overlay Tunnels, including NDR throughput (zero packet loss) +and PDR throughput (<0.5% packet loss). Performance is reported for VPP +running in multiple configurations of VPP worker thread(s), a.k.a. VPP +data plane thread(s), and their physical CPU core(s) placement. + +Results are generated by multiple executions of the same CSIT tests. +In order to display variation in measured throughput values, Box-and- +whisker plots are used to show their quartiles (Min, 1st quartile / 25th +percentile, 2nd quartile / 50th percentile / mean, 3rd quartile / 75th +percentile, Max). Outliers are plotted as individual points. + +*Title of each graph* is a regex (regular expression) matching all +throughput test cases plotted on this graph, *X-axis labels* are indices +of individual test suites executed by csit-vpp-perf-1704-all jobs that +created result output files used as data sources for the graph, *Y-axis +labels* are measured Packets Per Second [pps] values, and the *Graph +legend* lists the plotted test suites and their indices. .. note:: - Data sources for reported test results: i) FD.io test executor jobs + Test results have been generated by FD.io test executor jobs `csit-vpp-perf-1704-all - `_ , - ii) archived FD.io jobs test result `output files - <../../_static/archive/>`_. + `_, + with Robot Framework result files csit-vpp-perf-1704-all-.zip + `archived here <../../_static/archive/>`_ NDR Throughput ~~~~~~~~~~~~~~ -VPP NDR Throughput - running in configuration of **one worker thread (1t) on -one physical core (1c)** - is presented in the figure below. +VPP NDR 64B packet throughput in 1t1c setup (1thread, 1core) is presented +in the graph below. .. raw:: html @@ -34,21 +41,21 @@ one physical core (1c)** - is presented in the figure below. *Figure 1. VPP 1thread 1core - NDR Throughput for Phy-to-Phy IPv4 Overlay Tunnels.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "64B-1t1c-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc" * - 10ge2p1x520-ethip4lispip4-ip4base-ndrdisc.robot:| tc01-64B-1t1c-ethip4lispip4-ip4base-ndrdisc - 10ge2p1x520-ethip4lispip6-ip4base-ndrdisc.robot:| tc01-64B-1t1c-ethip4lispip6-ip4base-ndrdisc - 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrdisc.robot:| tc01-64B-1t1c-ethip4vxlan-l2bdbasemaclrn-ndrdisc - 10ge2p1x520-ethip4vxlan-l2xcbase-ndrdisc.robot:| tc01-64B-1t1c-ethip4vxlan-l2xcbase-ndrdisc + 10ge2p1x520-ethip4lispip4-ip4base-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4lispip4-ip4base-ndrdisc + 10ge2p1x520-ethip4lispip6-ip4base-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4lispip6-ip4base-ndrdisc + 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4vxlan-l2bdbasemaclrn-ndrdisc + 10ge2p1x520-ethip4vxlan-l2xcbase-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4vxlan-l2xcbase-ndrdisc -VPP NDR Throughput - running in configuration of **two worker threads (2t) on -two physical cores (2c)** - is presented in the figure below. +VPP NDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented +in the graph below. .. raw:: html @@ -56,25 +63,24 @@ two physical cores (2c)** - is presented in the figure below. *Figure 2. VPP 2threads 2cores - NDR Throughput for Phy-to-Phy IPv4 Overlay Tunnels.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "64B-2t2c-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc" * - 10ge2p1x520-ethip4lispip4-ip4base-ndrdisc.robot:| tc07-64B-2t2c-ethip4lispip4-ip4base-ndrdisc - 10ge2p1x520-ethip4lispip6-ip4base-ndrdisc.robot:| tc07-64B-2t2c-ethip4lispip6-ip4base-ndrdisc - 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrdisc.robot:| tc07-64B-2t2c-ethip4vxlan-l2bdbasemaclrn-ndrdisc - 10ge2p1x520-ethip4vxlan-l2xcbase-ndrdisc.robot:| tc07-64B-2t2c-ethip4vxlan-l2xcbase-ndrdisc + 10ge2p1x520-ethip4lispip4-ip4base-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4lispip4-ip4base-ndrdisc + 10ge2p1x520-ethip4lispip6-ip4base-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4lispip6-ip4base-ndrdisc + 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4vxlan-l2bdbasemaclrn-ndrdisc + 10ge2p1x520-ethip4vxlan-l2xcbase-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4vxlan-l2xcbase-ndrdisc PDR Throughput ~~~~~~~~~~~~~~ -VPP PDR Throughput - running in configuration of **one worker thread (1t) on one -physical core (1c)** - is presented in the figure below. PDR at below 0.5% -packet loss ratio. +VPP PDR 64B packet throughput in 1t1c setup (1thread, 1core) is presented +in the graph below. PDR measured for 0.5% packet loss ratio. .. raw:: html @@ -83,21 +89,21 @@ packet loss ratio. *Figure 3. VPP 1thread 1core - PDR Throughput for Phy-to-Phy IPv4 Overlay Tunnels.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "64B-1t1c-ethip4[a-z0-9]+-[a-z0-9]*-pdrdisc" * - 10ge2p1x520-ethip4lispip4-ip4base-ndrdisc.robot:| tc02-64B-1t1c-ethip4lispip4-ip4base-pdrdisc - 10ge2p1x520-ethip4lispip6-ip4base-ndrdisc.robot:| tc02-64B-1t1c-ethip4lispip6-ip4base-pdrdisc - 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrdisc.robot:| tc02-64B-1t1c-ethip4vxlan-l2bdbasemaclrn-pdrdisc - 10ge2p1x520-ethip4vxlan-l2xcbase-ndrdisc.robot:| tc02-64B-1t1c-ethip4vxlan-l2xcbase-pdrdisc + 10ge2p1x520-ethip4lispip4-ip4base-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4lispip4-ip4base-pdrdisc + 10ge2p1x520-ethip4lispip6-ip4base-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4lispip6-ip4base-pdrdisc + 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4vxlan-l2bdbasemaclrn-pdrdisc + 10ge2p1x520-ethip4vxlan-l2xcbase-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4vxlan-l2xcbase-pdrdisc -VPP PDR Throughput - running in configuration of **two worker threads (2t) on -two physical cores (2c)** - is presented in the figure below. +VPP PDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented +in the graph below. PDR measured for 0.5% packet loss ratio. .. raw:: html @@ -105,16 +111,16 @@ two physical cores (2c)** - is presented in the figure below. *Figure 4. VPP 2thread 2core - PDR Throughput for Phy-to-Phy IPv4 Overlay Tunnels.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "64B-2t2c-ethip4[a-z0-9]+-[a-z0-9]*-pdrdisc" * - 10ge2p1x520-ethip4lispip4-ip4base-ndrdisc.robot:| tc08-64B-2t2c-ethip4lispip4-ip4base-pdrdisc - 10ge2p1x520-ethip4lispip6-ip4base-ndrdisc.robot:| tc08-64B-2t2c-ethip4lispip6-ip4base-pdrdisc - 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrdisc.robot:| tc08-64B-2t2c-ethip4vxlan-l2bdbasemaclrn-pdrdisc - 10ge2p1x520-ethip4vxlan-l2xcbase-ndrdisc.robot:| tc08-64B-2t2c-ethip4vxlan-l2xcbase-pdrdisc + 10ge2p1x520-ethip4lispip4-ip4base-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4lispip4-ip4base-pdrdisc + 10ge2p1x520-ethip4lispip6-ip4base-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4lispip6-ip4base-pdrdisc + 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4vxlan-l2bdbasemaclrn-pdrdisc + 10ge2p1x520-ethip4vxlan-l2xcbase-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4vxlan-l2xcbase-pdrdisc diff --git a/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv6.rst b/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv6.rst index a84423aa9c..c475ab8f18 100644 --- a/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv6.rst +++ b/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv6.rst @@ -1,31 +1,38 @@ IPv6 Routed-Forwarding ====================== -Following sections provide a summary of VPP Phy-to-Phy IPv6 Routed-Forwarding -performance illustrating NDR throughput (zero packet loss) and PDR throughput -(<0.5% packet loss). Performance is reported for VPP running in multiple -configurations of VPP worker thread(s), a.k.a. VPP data plane thread (s), and -their physical CPU core(s) placement. - -*Title of each graph* is a regex (regular expression) matching all plotted -throughput test cases, *X-axis labels* are indeces of csit-vpp-perf-1704 jobs -that created result output files used as data sources for the graph, -*Y-axis labels* are measured Packets Per Second [pps] values, and the *graph -legend* identifes the plotted test suites. +Following sections include summary graphs of VPP Phy-to-Phy performance +with IPv6 Routed-Forwarding, including NDR throughput (zero packet loss) +and PDR throughput (<0.5% packet loss). Performance is reported for VPP +running in multiple configurations of VPP worker thread(s), a.k.a. VPP +data plane thread(s), and their physical CPU core(s) placement. + +Results are generated by multiple executions of the same CSIT tests. +In order to display variation in measured throughput values, Box-and- +whisker plots are used to show their quartiles (Min, 1st quartile / 25th +percentile, 2nd quartile / 50th percentile / mean, 3rd quartile / 75th +percentile, Max). Outliers are plotted as individual points. + +*Title of each graph* is a regex (regular expression) matching all +throughput test cases plotted on this graph, *X-axis labels* are indices +of individual test suites executed by csit-vpp-perf-1704-all jobs that +created result output files used as data sources for the graph, *Y-axis +labels* are measured Packets Per Second [pps] values, and the *Graph +legend* lists the plotted test suites and their indices. .. note:: - Data sources for reported test results: i) FD.io test executor jobs + Test results have been generated by FD.io test executor jobs `csit-vpp-perf-1704-all - `_ , - ii) archived FD.io jobs test result `output files - <../../_static/archive/>`_. + `_, + with Robot Framework result files csit-vpp-perf-1704-all-.zip + `archived here <../../_static/archive/>`_ NDR Throughput ~~~~~~~~~~~~~~ -VPP NDR Throughput - running in configuration of **one worker thread (1t) on -one physical core (1c)** - is presented in the figure below. +VPP NDR 78B packet throughput in 1t1c setup (1thread, 1core) is presented +in the graph below. .. raw:: html @@ -34,24 +41,24 @@ one physical core (1c)** - is presented in the figure below. *Figure 1. VPP 1thread 1core - NDR Throughput for Phy-to-Phy IPv6 Routed-Forwarding.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "78B-1t1c-ethip6-ip6[a-z0-9]+-[a-z-]*ndrdisc" * - 10ge2p1x520-ethip6-ip6base-copwhtlistbase-ndrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-copwhtlistbase-ndrdisc - 10ge2p1x520-ethip6-ip6base-iacldstbase-ndrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-iacldstbase-ndrdisc - 10ge2p1x520-ethip6-ip6base-ndrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-ndrdisc - 10ge2p1x520-ethip6-ip6scale200k-ndrdisc.robot:| tc01-78B-1t1c-ethip6-ip6scale200k-ndrdisc - 10ge2p1x520-ethip6-ip6scale20k-ndrdisc.robot:| tc01-78B-1t1c-ethip6-ip6scale20k-ndrdisc - 10ge2p1x520-ethip6-ip6scale2m-ndrdisc.robot:| tc01-78B-1t1c-ethip6-ip6scale2m-ndrdisc - 40ge2p1xl710-ethip6-ip6base-ndrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-ndrdisc + 10ge2p1x520-ethip6-ip6base-copwhtlistbase-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-copwhtlistbase-ndrdisc + 10ge2p1x520-ethip6-ip6base-iacldstbase-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-iacldstbase-ndrdisc + 10ge2p1x520-ethip6-ip6base-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-ndrdisc + 10ge2p1x520-ethip6-ip6scale200k-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6scale200k-ndrdisc + 10ge2p1x520-ethip6-ip6scale20k-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6scale20k-ndrdisc + 10ge2p1x520-ethip6-ip6scale2m-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6scale2m-ndrdisc + 40ge2p1xl710-ethip6-ip6base-ndrpdrdisc.robot:| tc01-78B-1t1c-ethip6-ip6base-ndrdisc -VPP NDR Throughput - running in configuration of **two worker threads (2t) on -two physical cores (2c)** - is presented in the figure below. +VPP NDR 78B packet throughput in 2t2c setup (2thread, 2core) is presented +in the graph below. .. raw:: html @@ -60,28 +67,27 @@ two physical cores (2c)** - is presented in the figure below. *Figure 2. VPP 2threads 2cores - NDR Throughput for Phy-to-Phy IPv6 Routed-Forwarding.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: + .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "78B-2t2c-ethip6-ip6[a-z0-9]+-[a-z-]*ndrdisc" * - 10ge2p1x520-ethip6-ip6base-copwhtlistbase-ndrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-copwhtlistbase-ndrdisc - 10ge2p1x520-ethip6-ip6base-iacldstbase-ndrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-iacldstbase-ndrdisc - 10ge2p1x520-ethip6-ip6base-ipolicemarkbase-ndrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-ipolicemarkbase-ndrdisc - 10ge2p1x520-ethip6-ip6base-ndrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-ndrdisc - 10ge2p1x520-ethip6-ip6scale200k-ndrdisc.robot:| tc07-78B-2t2c-ethip6-ip6scale200k-ndrdisc - 10ge2p1x520-ethip6-ip6scale20k-ndrdisc.robot:| tc07-78B-2t2c-ethip6-ip6scale20k-ndrdisc - 10ge2p1x520-ethip6-ip6scale2m-ndrdisc.robot:| tc07-78B-2t2c-ethip6-ip6scale2m-ndrdisc - 40ge2p1xl710-ethip6-ip6base-ndrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-ndrdisc + 10ge2p1x520-ethip6-ip6base-copwhtlistbase-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-copwhtlistbase-ndrdisc + 10ge2p1x520-ethip6-ip6base-iacldstbase-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-iacldstbase-ndrdisc + 10ge2p1x520-ethip6-ip6base-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-ndrdisc + 10ge2p1x520-ethip6-ip6scale200k-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6scale200k-ndrdisc + 10ge2p1x520-ethip6-ip6scale20k-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6scale20k-ndrdisc + 10ge2p1x520-ethip6-ip6scale2m-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6scale2m-ndrdisc + 40ge2p1xl710-ethip6-ip6base-ndrpdrdisc.robot:| tc07-78B-2t2c-ethip6-ip6base-ndrdisc PDR Throughput ~~~~~~~~~~~~~~ -VPP PDR Throughput - running in configuration of **one worker thread (1t) on one -physical core (1c)** - is presented in the figure below. PDR at below 0.5% -packet loss ratio. +VPP PDR 78B packet throughput in 1t1c setup (1thread, 1core) is presented +in the graph below. PDR measured for 0.5% packet loss ratio. .. raw:: html @@ -90,23 +96,23 @@ packet loss ratio. *Figure 3. VPP 1thread 1core - PDR Throughput for Phy-to-Phy IPv6 Routed-Forwarding.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "78B-1t1c-ethip6-ip6[a-z0-9]+-[a-z-]*pdrdisc" * - 10ge2p1x520-ethip6-ip6base-copwhtlistbase-ndrdisc.robot:| tc02-78B-1t1c-ethip6-ip6base-copwhtlistbase-pdrdisc - 10ge2p1x520-ethip6-ip6base-iacldstbase-ndrdisc.robot:| tc02-78B-1t1c-ethip6-ip6base-iacldstbase-pdrdisc - 10ge2p1x520-ethip6-ip6base-ndrdisc.robot:| tc02-78B-1t1c-ethip6-ip6base-pdrdisc - 10ge2p1x520-ethip6-ip6scale200k-ndrdisc.robot:| tc02-78B-1t1c-ethip6-ip6scale200k-pdrdisc - 10ge2p1x520-ethip6-ip6scale20k-ndrdisc.robot:| tc02-78B-1t1c-ethip6-ip6scale20k-pdrdisc - 10ge2p1x520-ethip6-ip6scale2m-ndrdisc.robot:| tc02-78B-1t1c-ethip6-ip6scale2m-pdrdisc + 10ge2p1x520-ethip6-ip6base-copwhtlistbase-ndrpdrdisc.robot:| tc02-78B-1t1c-ethip6-ip6base-copwhtlistbase-pdrdisc + 10ge2p1x520-ethip6-ip6base-iacldstbase-ndrpdrdisc.robot:| tc02-78B-1t1c-ethip6-ip6base-iacldstbase-pdrdisc + 10ge2p1x520-ethip6-ip6base-ndrpdrdisc.robot:| tc02-78B-1t1c-ethip6-ip6base-pdrdisc + 10ge2p1x520-ethip6-ip6scale200k-ndrpdrdisc.robot:| tc02-78B-1t1c-ethip6-ip6scale200k-pdrdisc + 10ge2p1x520-ethip6-ip6scale20k-ndrpdrdisc.robot:| tc02-78B-1t1c-ethip6-ip6scale20k-pdrdisc + 10ge2p1x520-ethip6-ip6scale2m-ndrpdrdisc.robot:| tc02-78B-1t1c-ethip6-ip6scale2m-pdrdisc -VPP PDR Throughput - running in configuration of **two worker threads (2t) on -two physical cores (2c)** - is presented in the figure below. +VPP PDR 78B packet throughput in 2t2c setup (2thread, 2core) is presented +in the graph below. PDR measured for 0.5% packet loss ratio. .. raw:: html @@ -115,18 +121,18 @@ two physical cores (2c)** - is presented in the figure below. *Figure 4. VPP 2thread 2core - PDR Throughput for Phy-to-Phy IPv6 Routed-Forwarding.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "78B-2t2c-ethip6-ip6[a-z0-9]+-[a-z-]*pdrdisc" * - 10ge2p1x520-ethip6-ip6base-copwhtlistbase-ndrdisc.robot:| tc08-78B-2t2c-ethip6-ip6base-copwhtlistbase-pdrdisc - 10ge2p1x520-ethip6-ip6base-iacldstbase-ndrdisc.robot:| tc08-78B-2t2c-ethip6-ip6base-iacldstbase-pdrdisc - 10ge2p1x520-ethip6-ip6base-ndrdisc.robot:| tc08-78B-2t2c-ethip6-ip6base-pdrdisc - 10ge2p1x520-ethip6-ip6scale200k-ndrdisc.robot:| tc08-78B-2t2c-ethip6-ip6scale200k-pdrdisc - 10ge2p1x520-ethip6-ip6scale20k-ndrdisc.robot:| tc08-78B-2t2c-ethip6-ip6scale20k-pdrdisc - 10ge2p1x520-ethip6-ip6scale2m-ndrdisc.robot:| tc08-78B-2t2c-ethip6-ip6scale2m-pdrdisc + 10ge2p1x520-ethip6-ip6base-copwhtlistbase-ndrpdrdisc.robot:| tc08-78B-2t2c-ethip6-ip6base-copwhtlistbase-pdrdisc + 10ge2p1x520-ethip6-ip6base-iacldstbase-ndrpdrdisc.robot:| tc08-78B-2t2c-ethip6-ip6base-iacldstbase-pdrdisc + 10ge2p1x520-ethip6-ip6base-ndrpdrdisc.robot:| tc08-78B-2t2c-ethip6-ip6base-pdrdisc + 10ge2p1x520-ethip6-ip6scale200k-ndrpdrdisc.robot:| tc08-78B-2t2c-ethip6-ip6scale200k-pdrdisc + 10ge2p1x520-ethip6-ip6scale20k-ndrpdrdisc.robot:| tc08-78B-2t2c-ethip6-ip6scale20k-pdrdisc + 10ge2p1x520-ethip6-ip6scale2m-ndrpdrdisc.robot:| tc08-78B-2t2c-ethip6-ip6scale2m-pdrdisc diff --git a/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv6_tunnels.rst b/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv6_tunnels.rst index 8619ce661b..79e77445dd 100644 --- a/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv6_tunnels.rst +++ b/docs/report/vpp_performance_tests/packet_throughput_graphs/ipv6_tunnels.rst @@ -1,31 +1,38 @@ IPv6 Overlay Tunnels ==================== -Following sections provide a summary of VPP Phy-to-Phy IPv6 Overlay Tunnels -performance illustrating NDR throughput (zero packet loss) and PDR throughput -(<0.5% packet loss). Performance is reported for VPP running in multiple -configurations of VPP worker thread(s), a.k.a. VPP data plane thread (s), and -their physical CPU core(s) placement. - -*Title of each graph* is a regex (regular expression) matching all plotted -throughput test cases, *X-axis labels* are indeces of csit-vpp-perf-1704 jobs -that created result output files used as data sources for the graph, -*Y-axis labels* are measured Packets Per Second [pps] values, and the *graph -legend* identifes the plotted test suites. +Following sections include summary graphs of VPP Phy-to-Phy performance +with IPv6 Overlay Tunnels, including NDR throughput (zero packet loss) +and PDR throughput (<0.5% packet loss). Performance is reported for VPP +running in multiple configurations of VPP worker thread(s), a.k.a. VPP +data plane thread(s), and their physical CPU core(s) placement. + +Rresults are generated by multiple executions of the same CSIT tests. +In order to display variation in measured throughput values, Box-and- +whisker plots are used to show their quartiles (Min, 1st quartile / 25th +percentile, 2nd quartile / 50th percentile / mean, 3rd quartile / 75th +percentile, Max). Outliers are plotted as individual points. + +*Title of each graph* is a regex (regular expression) matching all +throughput test cases plotted on this graph, *X-axis labels* are indices +of individual test suites executed by csit-vpp-perf-1704-all jobs that +created result output files used as data sources for the graph, *Y-axis +labels* are measured Packets Per Second [pps] values, and the *Graph +legend* lists the plotted test suites and their indices. .. note:: - Data sources for reported test results: i) FD.io test executor jobs + Test results have been generated by FD.io test executor jobs `csit-vpp-perf-1704-all - `_ , - ii) archived FD.io jobs test result `output files - <../../_static/archive/>`_. + `_, + with Robot Framework result files csit-vpp-perf-1704-all-.zip + `archived here <../../_static/archive/>`_ NDR Throughput ~~~~~~~~~~~~~~ -VPP NDR Throughput - running in configuration of **one worker thread (1t) on -one physical core (1c)** - is presented in the figure below. +VPP NDR 78B packet throughput in 1t1c setup (1thread, 1core) is presented +in the graph below. .. raw:: html @@ -34,19 +41,19 @@ one physical core (1c)** - is presented in the figure below. *Figure 1. VPP 1thread 1core - NDR Throughput for Phy-to-Phy IPv6 Overlay Tunnels.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "78B-1t1c-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc" * 10ge2p1x520-ethip6lispip4-ip6base-ndrdisc.robot:| tc01-78B-1t1c-ethip6lispip4-ip6base-ndrdisc 10ge2p1x520-ethip6lispip6-ip6base-ndrdisc.robot:| tc01-78B-1t1c-ethip6lispip6-ip6base-ndrdisc -VPP NDR Throughput - running in configuration of **two worker threads (2t) on -two physical cores (2c)** - is presented in the figure below. +VPP NDR 78B packet throughput in 2t2c setup (2thread, 2core) is presented +in the graph below. .. raw:: html @@ -55,12 +62,12 @@ two physical cores (2c)** - is presented in the figure below. *Figure 2. VPP 2threads 2cores - NDR Throughput for Phy-to-Phy IPv6 Overlay Tunnels.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "78B-2t2c-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc" * 10ge2p1x520-ethip6lispip4-ip6base-ndrdisc.robot:| tc07-78B-2t2c-ethip6lispip4-ip6base-ndrdisc @@ -69,9 +76,8 @@ git repository by filtering with specified regex as follows: PDR Throughput ~~~~~~~~~~~~~~ -VPP PDR Throughput - running in configuration of **one worker thread (1t) on one -physical core (1c)** - is presented in the figure below. PDR at below 0.5% -packet loss ratio. +VPP PDR 78B packet throughput in 1t1c setup (1thread, 1core) is presented +in the graph below. PDR measured for 0.5% packet loss ratio. .. raw:: html @@ -80,19 +86,19 @@ packet loss ratio. *Figure 3. VPP 1thread 1core - PDR Throughput for Phy-to-Phy IPv6 Overlay Tunnels.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "78B-1t1c-ethip6[a-z0-9]+-[a-z0-9]*-pdrdisc" * 10ge2p1x520-ethip6lispip4-ip6base-ndrdisc.robot:| tc02-78B-1t1c-ethip6lispip4-ip6base-pdrdisc 10ge2p1x520-ethip6lispip6-ip6base-ndrdisc.robot:| tc02-78B-1t1c-ethip6lispip6-ip6base-pdrdisc -VPP PDR Throughput - running in configuration of **two worker threads (2t) on -two physical cores (2c)** - is presented in the figure below. +VPP PDR 78B packet throughput in 2t2c setup (2thread, 2core) is presented +in the graph below. PDR measured for 0.5% packet loss ratio. .. raw:: html @@ -101,12 +107,12 @@ two physical cores (2c)** - is presented in the figure below. *Figure 4. VPP 2thread 2core - PDR Throughput for Phy-to-Phy IPv6 Overlay Tunnels.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "78B-2t2c-ethip6[a-z0-9]+-[a-z0-9]*-pdrdisc" * 10ge2p1x520-ethip6lispip4-ip6base-ndrdisc.robot:| tc08-78B-2t2c-ethip6lispip4-ip6base-pdrdisc diff --git a/docs/report/vpp_performance_tests/packet_throughput_graphs/l2.rst b/docs/report/vpp_performance_tests/packet_throughput_graphs/l2.rst index 3e1d645389..d9a69015e4 100644 --- a/docs/report/vpp_performance_tests/packet_throughput_graphs/l2.rst +++ b/docs/report/vpp_performance_tests/packet_throughput_graphs/l2.rst @@ -1,31 +1,38 @@ L2 Ethernet Switching ===================== -Following sections provide a summary of VPP Phy-to-Phy L2 Ethernet switching -performance illustrating NDR throughput (zero packet loss) and PDR throughput -(<0.5% packet loss). Performance is reported for VPP running in multiple -configurations of VPP worker thread(s), a.k.a. VPP data plane thread (s), and -their physical CPU core(s) placement. - -*Title of each graph* is a regex (regular expression) matching all plotted -throughput test cases, *X-axis labels* are indeces of csit-vpp-perf-1704 jobs -that created result output files used as data sources for the graph, -*Y-axis labels* are measured Packets Per Second [pps] values, and the *graph -legend* identifes the plotted test suites. +Following sections include summary graphs of VPP Phy-to-Phy performance +with L2 Ethernet switching, including NDR throughput (zero packet loss) +and PDR throughput (<0.5% packet loss). Performance is reported for VPP +running in multiple configurations of VPP worker thread(s), a.k.a. VPP +data plane thread(s), and their physical CPU core(s) placement. + +Results are generated by multiple executions of the same CSIT tests. +In order to display variation in measured throughput values, Box-and- +whisker plots are used to show their quartiles (Min, 1st quartile / 25th +percentile, 2nd quartile / 50th percentile / mean, 3rd quartile / 75th +percentile, Max). Outliers are plotted as individual points. + +*Title of each graph* is a regex (regular expression) matching all +throughput test cases plotted on this graph, *X-axis labels* are indices +of individual test suites executed by csit-vpp-perf-1704-all jobs that +created result output files used as data sources for the graph, *Y-axis +labels* are measured Packets Per Second [pps] values, and the *Graph +legend* lists the plotted test suites and their indices. .. note:: - Data sources for reported test results: i) FD.io test executor jobs + Test results have been generated by FD.io test executor jobs `csit-vpp-perf-1704-all - `_ , - ii) archived FD.io jobs test result `output files - <../../_static/archive/>`_. + `_, + with Robot Framework result files csit-vpp-perf-1704-all-.zip + `archived here <../../_static/archive/>`_ NDR Throughput ~~~~~~~~~~~~~~ -VPP NDR Throughput - running in configuration of **one worker thread (1t) on -one physical core (1c)** - is presented in the figure below. +VPP NDR 64B packet throughput in 1t1c setup (1thread, 1core) is presented +in the graph below. .. raw:: html @@ -34,26 +41,26 @@ one physical core (1c)** - is presented in the figure below. *Figure 1. VPP 1thread 1core - NDR Throughput for Phy-to-Phy L2 Ethernet Switching.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" * - 10ge2p1vic1227-eth-l2bdbasemaclrn-ndrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc - 10ge2p1x520-dot1ad-l2xcbase-ndrdisc.robot:| tc01-64B-1t1c-dot1ad-l2xcbase-ndrdisc - 10ge2p1x520-dot1q-l2xcbase-ndrdisc.robot:| tc01-64B-1t1c-dot1q-l2xcbase-ndrdisc - 10ge2p1x520-eth-l2bdbasemaclrn-ndrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc - 10ge2p1x520-eth-l2xcbase-ndrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-ndrdisc - 10ge2p1x710-eth-l2bdbasemaclrn-ndrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc - 40ge2p1vic1385-eth-l2bdbasemaclrn-ndrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc - 40ge2p1xl710-eth-l2bdbasemaclrn-ndrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc - 40ge2p1xl710-eth-l2xcbase-ndrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-ndrdisc + 10ge2p1vic1227-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc + 10ge2p1x520-dot1ad-l2xcbase-ndrpdrdisc.robot:| tc01-64B-1t1c-dot1ad-l2xcbase-ndrdisc + 10ge2p1x520-dot1q-l2xcbase-ndrpdrdisc.robot:| tc01-64B-1t1c-dot1q-l2xcbase-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc + 10ge2p1x520-eth-l2xcbase-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-ndrdisc + 10ge2p1x710-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc + 40ge2p1vic1385-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc + 40ge2p1xl710-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-ndrdisc + 40ge2p1xl710-eth-l2xcbase-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-ndrdisc -VPP NDR Throughput - running in configuration of **two worker threads (2t) on two -physical cores (2c)** - is presented in the figure below. +VPP NDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented +in the graph below. .. raw:: html @@ -62,30 +69,29 @@ physical cores (2c)** - is presented in the figure below. *Figure 2. VPP 2threads 2cores - NDR Throughput for Phy-to-Phy L2 Ethernet Switching.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc" * - 10ge2p1vic1227-eth-l2bdbasemaclrn-ndrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc - 10ge2p1x520-dot1ad-l2xcbase-ndrdisc.robot:| tc07-64B-2t2c-dot1ad-l2xcbase-ndrdisc - 10ge2p1x520-dot1q-l2xcbase-ndrdisc.robot:| tc07-64B-2t2c-dot1q-l2xcbase-ndrdisc - 10ge2p1x520-eth-l2bdbasemaclrn-ndrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc - 10ge2p1x520-eth-l2xcbase-ndrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-ndrdisc - 10ge2p1x710-eth-l2bdbasemaclrn-ndrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc - 40ge2p1vic1385-eth-l2bdbasemaclrn-ndrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc - 40ge2p1xl710-eth-l2bdbasemaclrn-ndrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc - 40ge2p1xl710-eth-l2xcbase-ndrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-ndrdisc + 10ge2p1vic1227-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc + 10ge2p1x520-dot1ad-l2xcbase-ndrpdrdisc.robot:| tc07-64B-2t2c-dot1ad-l2xcbase-ndrdisc + 10ge2p1x520-dot1q-l2xcbase-ndrpdrdisc.robot:| tc07-64B-2t2c-dot1q-l2xcbase-ndrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc + 10ge2p1x520-eth-l2xcbase-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-ndrdisc + 10ge2p1x710-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc + 40ge2p1vic1385-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc + 40ge2p1xl710-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-ndrdisc + 40ge2p1xl710-eth-l2xcbase-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-ndrdisc PDR Throughput ~~~~~~~~~~~~~~ -VPP PDR Throughput - running in configuration of **one worker thread (1t) on one -physical core (1c)** - is presented in the figure below. PDR at below 0.5% packet -loss ratio. +VPP PDR 64B packet throughput in 1t1c setup (1thread, 1core) is presented +in the graph below. PDR measured for 0.5% packet loss ratio. .. raw:: html @@ -94,24 +100,24 @@ loss ratio. *Figure 3. VPP 1thread 1core - PDR Throughput for Phy-to-Phy L2 Ethernet Switching.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc" * - 10ge2p1vic1227-eth-l2bdbasemaclrn-ndrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-pdrdisc - 10ge2p1x520-dot1ad-l2xcbase-ndrdisc.robot:| tc02-64B-1t1c-dot1ad-l2xcbase-pdrdisc - 10ge2p1x520-dot1q-l2xcbase-ndrdisc.robot:| tc02-64B-1t1c-dot1q-l2xcbase-pdrdisc - 10ge2p1x520-eth-l2bdbasemaclrn-ndrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-pdrdisc - 10ge2p1x520-eth-l2xcbase-ndrdisc.robot:| tc02-64B-1t1c-eth-l2xcbase-pdrdisc - 10ge2p1x710-eth-l2bdbasemaclrn-ndrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-pdrdisc - 40ge2p1vic1385-eth-l2bdbasemaclrn-ndrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-pdrdisc + 10ge2p1vic1227-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-pdrdisc + 10ge2p1x520-dot1ad-l2xcbase-ndrpdrdisc.robot:| tc02-64B-1t1c-dot1ad-l2xcbase-pdrdisc + 10ge2p1x520-dot1q-l2xcbase-ndrpdrdisc.robot:| tc02-64B-1t1c-dot1q-l2xcbase-pdrdisc + 10ge2p1x520-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-pdrdisc + 10ge2p1x520-eth-l2xcbase-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2xcbase-pdrdisc + 10ge2p1x710-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-pdrdisc + 40ge2p1vic1385-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-pdrdisc -VPP PDR Throughput - running in configuration of **two worker threads (2t) on -two physical cores (2c)** - is presented in the figure below. +VPP PDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented +in the graph below. PDR measured for 0.5% packet loss ratio. .. raw:: html @@ -120,19 +126,19 @@ two physical cores (2c)** - is presented in the figure below. *Figure 4. VPP 2thread 2core - PDR Throughput for Phy-to-Phy L2 Ethernet Switching.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc" * - 10ge2p1vic1227-eth-l2bdbasemaclrn-ndrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-pdrdisc - 10ge2p1x520-dot1ad-l2xcbase-ndrdisc.robot:| tc08-64B-2t2c-dot1ad-l2xcbase-pdrdisc - 10ge2p1x520-dot1q-l2xcbase-ndrdisc.robot:| tc08-64B-2t2c-dot1q-l2xcbase-pdrdisc - 10ge2p1x520-eth-l2bdbasemaclrn-ndrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-pdrdisc - 10ge2p1x520-eth-l2xcbase-ndrdisc.robot:| tc08-64B-2t2c-eth-l2xcbase-pdrdisc - 10ge2p1x710-eth-l2bdbasemaclrn-ndrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-pdrdisc - 40ge2p1vic1385-eth-l2bdbasemaclrn-ndrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-pdrdisc +10ge2p1vic1227-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-pdrdisc +10ge2p1x520-dot1ad-l2xcbase-ndrpdrdisc.robot:| tc08-64B-2t2c-dot1ad-l2xcbase-pdrdisc +10ge2p1x520-dot1q-l2xcbase-ndrpdrdisc.robot:| tc08-64B-2t2c-dot1q-l2xcbase-pdrdisc +10ge2p1x520-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-pdrdisc +10ge2p1x520-eth-l2xcbase-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2xcbase-pdrdisc +10ge2p1x710-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-pdrdisc +40ge2p1vic1385-eth-l2bdbasemaclrn-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-pdrdisc diff --git a/docs/report/vpp_performance_tests/packet_throughput_graphs/vm_vhost.rst b/docs/report/vpp_performance_tests/packet_throughput_graphs/vm_vhost.rst index ce2e12961c..e4c6af9f6b 100644 --- a/docs/report/vpp_performance_tests/packet_throughput_graphs/vm_vhost.rst +++ b/docs/report/vpp_performance_tests/packet_throughput_graphs/vm_vhost.rst @@ -1,31 +1,39 @@ VM vhost Connections ==================== -Following sections provide a summary of VPP Phy-to-VM-to-Phy VM vhost-user -performance illustrating NDR throughput (zero packet loss) and PDR throughput -(<0.5% packet loss). Performance is reported for VPP running in multiple -configurations of VPP worker thread(s), a.k.a. VPP data plane thread (s), and -their physical CPU core(s) placement. - -*Title of each graph* is a regex (regular expression) matching all plotted -throughput test cases, *X-axis labels* are indeces of csit-vpp-perf-1704 jobs -that created result output files used as data sources for the graph, -*Y-axis labels* are measured Packets Per Second [pps] values, and the *graph -legend* identifes the plotted test suites. +Following sections include summary graphs of VPP Phy-to-VM(s)-to-Phy +performance with VM virtio and VPP vhost-user virtual interfaces, +including NDR throughput (zero packet loss) and PDR throughput (<0.5% +packet loss). Performance is reported for VPP running in multiple +configurations of VPP worker thread(s), a.k.a. VPP data plane thread(s), +and their physical CPU core(s) placement. + +Results are generated by multiple executions of the same CSIT tests. +In order to display variation in measured throughput values, Box-and- +whisker plots are used to show their quartiles (Min, 1st quartile / 25th +percentile, 2nd quartile / 50th percentile / mean, 3rd quartile / 75th +percentile, Max). Outliers are plotted as individual points. + +*Title of each graph* is a regex (regular expression) matching all +throughput test cases plotted on this graph, *X-axis labels* are indices +of individual test suites executed by csit-vpp-perf-1704-all jobs that +created result output files used as data sources for the graph, *Y-axis +labels* are measured Packets Per Second [pps] values, and the *Graph +legend* lists the plotted test suites and their indices. .. note:: - Data sources for reported test results: i) FD.io test executor jobs + Test results have been generated by FD.io test executor jobs `csit-vpp-perf-1704-all - `_ , - ii) archived FD.io jobs test result `output files - <../../_static/archive/>`_. + `_, + with Robot Framework result files csit-vpp-perf-1704-all-.zip + `archived here <../../_static/archive/>`_ NDR Throughput ~~~~~~~~~~~~~~ -VPP NDR Throughput - running in configuration of **one worker thread (1t) on one -physical core (1c)** - is presented in the figure below. +VPP NDR 64B packet throughput in 1t1c setup (1thread, 1core) is presented +in the graph below. .. raw:: html @@ -34,31 +42,31 @@ physical core (1c)** - is presented in the figure below. *Figure 1. VPP 1thread 1core - NDR Throughput for Phy-to-VM-to-Phy VM vhost-user vhost-user.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "64B-1t1c-.*vhost.*-ndrdisc" * 10ge2p1x520-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc 10ge2p1x520-dot1q-l2xcbase-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc - 10ge2p1x520-ethip4-ip4base-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-eth-2vhost-1vm-ndrdisc - 10ge2p1x520-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-eth-4vhost-2vm-ndrdisc - 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc 10ge2p1x520-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrdisc 10ge2p1x520-eth-l2xcbase-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc 10ge2p1x520-eth-l2xcbase-eth-4vhost-2vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-eth-4vhost-2vm-ndrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-eth-2vhost-1vm-ndrdisc + 10ge2p1x520-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4-ip4base-eth-4vhost-2vm-ndrdisc + 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc 10ge2p1x710-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc - 40ge2p1xl710-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-ip4base-eth-4vhost-2vm-ndrdisc 40ge2p1xl710-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc 40ge2p1xl710-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrdisc 40ge2p1xl710-eth-l2xcbase-eth-4vhost-2vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-l2xcbase-eth-4vhost-2vm-ndrdisc + 40ge2p1xl710-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc01-64B-1t1c-eth-ip4base-eth-4vhost-2vm-ndrdisc -VPP NDR Throughput - running in configuration of **two worker threads (2t) on -two physical cores (2c)** - is presented in the figure below. +VPP NDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented +in the graph below. .. raw:: html @@ -67,35 +75,34 @@ two physical cores (2c)** - is presented in the figure below. *Figure 2. VPP 2threads 2cores - NDR Throughput for Phy-to-VM-to-Phy VM vhost-user vhost-user.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "64B-2t2c-.*vhost.*-ndrdisc" * 10ge2p1x520-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc 10ge2p1x520-dot1q-l2xcbase-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc - 10ge2p1x520-ethip4-ip4base-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-eth-2vhost-1vm-ndrdisc - 10ge2p1x520-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-eth-4vhost-2vm-ndrdisc - 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc 10ge2p1x520-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrdisc 10ge2p1x520-eth-l2xcbase-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc 10ge2p1x520-eth-l2xcbase-eth-4vhost-2vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-eth-4vhost-2vm-ndrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-eth-2vhost-1vm-ndrdisc + 10ge2p1x520-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4-ip4base-eth-4vhost-2vm-ndrdisc + 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc 10ge2p1x710-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc - 40ge2p1xl710-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-ip4base-eth-4vhost-2vm-ndrdisc 40ge2p1xl710-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc 40ge2p1xl710-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrdisc 40ge2p1xl710-eth-l2xcbase-eth-4vhost-2vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-l2xcbase-eth-4vhost-2vm-ndrdisc + 40ge2p1xl710-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc07-64B-2t2c-eth-ip4base-eth-4vhost-2vm-ndrdisc PDR Throughput ~~~~~~~~~~~~~~ -VPP PDR Throughput - running in configuration of **one worker thread (1t) on one -physical core (1c)** - is presented in the figure below. PDR at below 0.5% -packet loss ratio. +VPP PDR 64B packet throughput in 1t1c setup (1thread, 1core) is presented +in the graph below. PDR measured for 0.5% packet loss ratio. .. raw:: html @@ -104,31 +111,31 @@ packet loss ratio. *Figure 3. VPP 1thread 1core - PDR Throughput for Phy-to-VM-to-Phy VM vhost-user vhost-user.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "64B-1t1c-.*vhost.*-pdrdisc" * 10ge2p1x520-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc02-64B-1t1c-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc 10ge2p1x520-dot1q-l2xcbase-eth-2vhost-1vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2xcbase-eth-2vhost-1vm-pdrdisc - 10ge2p1x520-ethip4-ip4base-eth-2vhost-1vm-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4base-eth-2vhost-1vm-pdrdisc - 10ge2p1x520-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4base-eth-4vhost-2vm-pdrdisc - 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc 10ge2p1x520-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-eth-4vhost-2vm-pdrdisc 10ge2p1x520-eth-l2xcbase-eth-2vhost-1vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2xcbase-eth-2vhost-1vm-pdrdisc 10ge2p1x520-eth-l2xcbase-eth-4vhost-2vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2xcbase-eth-4vhost-2vm-pdrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhost-1vm-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4base-eth-2vhost-1vm-pdrdisc + 10ge2p1x520-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4-ip4base-eth-4vhost-2vm-pdrdisc + 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc02-64B-1t1c-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc 10ge2p1x710-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc - 40ge2p1xl710-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-ip4base-eth-4vhost-2vm-pdrdisc 40ge2p1xl710-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc 40ge2p1xl710-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2bdbasemaclrn-eth-4vhost-2vm-pdrdisc 40ge2p1xl710-eth-l2xcbase-eth-4vhost-2vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-l2xcbase-eth-4vhost-2vm-pdrdisc + 40ge2p1xl710-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc02-64B-1t1c-eth-ip4base-eth-4vhost-2vm-pdrdisc -VPP PDR Throughput - running in configuration of **two worker threads (2t) on -two physical cores (2c)** - is presented in the figure below. +VPP PDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented +in the graph below. PDR measured for 0.5% packet loss ratio. .. raw:: html @@ -137,26 +144,26 @@ two physical cores (2c)** - is presented in the figure below. *Figure 4. VPP 2thread 2core - PDR Throughput for Phy-to-VM-to-Phy VM vhost-user vhost-user.* -CSIT test cases used to generate results presented above can be found in CSIT -git repository by filtering with specified regex as follows: +CSIT source code for the test cases used for above plots can be found in CSIT +git repository: .. code-block:: bash - $ csit/tests/perf + $ cd $CSIT/tests/perf $ grep -E "64B-2t2c-.*vhost.*-pdrdisc" * 10ge2p1x520-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc08-64B-2t2c-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc 10ge2p1x520-dot1q-l2xcbase-eth-2vhost-1vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2xcbase-eth-2vhost-1vm-pdrdisc - 10ge2p1x520-ethip4-ip4base-eth-2vhost-1vm-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4-ip4base-eth-2vhost-1vm-pdrdisc - 10ge2p1x520-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4-ip4base-eth-4vhost-2vm-pdrdisc - 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc 10ge2p1x520-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc 10ge2p1x520-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-eth-4vhost-2vm-pdrdisc 10ge2p1x520-eth-l2xcbase-eth-2vhost-1vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2xcbase-eth-2vhost-1vm-pdrdisc 10ge2p1x520-eth-l2xcbase-eth-4vhost-2vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2xcbase-eth-4vhost-2vm-pdrdisc + 10ge2p1x520-ethip4-ip4base-eth-2vhost-1vm-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4-ip4base-eth-2vhost-1vm-pdrdisc + 10ge2p1x520-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4-ip4base-eth-4vhost-2vm-pdrdisc + 10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc08-64B-2t2c-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc 10ge2p1x710-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc - 40ge2p1xl710-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-ip4base-eth-4vhost-2vm-pdrdisc 40ge2p1xl710-eth-l2bdbasemaclrn-eth-2vhost-1vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-eth-2vhost-1vm-pdrdisc 40ge2p1xl710-eth-l2bdbasemaclrn-eth-4vhost-2vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2bdbasemaclrn-eth-4vhost-2vm-pdrdisc 40ge2p1xl710-eth-l2xcbase-eth-4vhost-2vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-l2xcbase-eth-4vhost-2vm-pdrdisc + 40ge2p1xl710-ethip4-ip4base-eth-4vhost-2vm-ndrpdrdisc.robot:| tc08-64B-2t2c-eth-ip4base-eth-4vhost-2vm-pdrdisc -- cgit 1.2.3-korg