aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTibor Frank <tifrank@cisco.com>2018-05-29 10:45:47 +0200
committerTibor Frank <tifrank@cisco.com>2018-05-30 11:06:08 +0000
commite01470ec9038338409a494a2652eecabf4394578 (patch)
tree9a2e19441e2456652901b0e6438fdf909668f6fa
parent564c2ae4f2d3cc7a210f6fe17f55091afcc05d45 (diff)
CSIT-1105: Prepare and generate 18.01.2 report
Change-Id: Iebda4fd10701c27512b443c14b2aeef314003d58 Signed-off-by: Tibor Frank <tifrank@cisco.com>
-rw-r--r--docs/report/detailed_test_results/index.rst5
-rw-r--r--docs/report/detailed_test_results/vpp_functional_results/index.rst4
-rw-r--r--docs/report/detailed_test_results/vpp_functional_results_centos/index.rst12
-rw-r--r--docs/report/detailed_test_results/vpp_mrr_results/index.rst (renamed from docs/report/detailed_test_results/dpdk_performance_results/index.rst)8
-rw-r--r--docs/report/detailed_test_results/vpp_unittest_results/index.rst7
-rw-r--r--docs/report/detailed_test_results/vpp_unittest_results/vpp_unittest_results.rst1178
-rw-r--r--docs/report/dpdk_performance_tests/csit_release_notes.rst23
-rw-r--r--docs/report/dpdk_performance_tests/documentation.rst5
-rw-r--r--docs/report/dpdk_performance_tests/index.rst12
-rw-r--r--docs/report/dpdk_performance_tests/overview.rst240
-rw-r--r--docs/report/dpdk_performance_tests/packet_latency_graphs/index.rst28
-rw-r--r--docs/report/dpdk_performance_tests/packet_latency_graphs/ip4.rst73
-rw-r--r--docs/report/dpdk_performance_tests/packet_latency_graphs/l2.rst74
-rw-r--r--docs/report/dpdk_performance_tests/packet_throughput_graphs/index.rst39
-rw-r--r--docs/report/dpdk_performance_tests/packet_throughput_graphs/ip4.rst150
-rw-r--r--docs/report/dpdk_performance_tests/packet_throughput_graphs/l2.rst150
-rw-r--r--docs/report/dpdk_performance_tests/test_environment.rst68
-rw-r--r--docs/report/honeycomb_performance_tests/csit_release_notes.rst20
-rw-r--r--docs/report/honeycomb_performance_tests/documentation.rst5
-rw-r--r--docs/report/honeycomb_performance_tests/index.rst11
-rw-r--r--docs/report/honeycomb_performance_tests/overview.rst122
-rw-r--r--docs/report/honeycomb_performance_tests/test_environment.rst22
-rw-r--r--docs/report/honeycomb_performance_tests/test_result_data.rst19
-rw-r--r--docs/report/index.rst9
-rw-r--r--docs/report/introduction/general_notes.rst14
-rw-r--r--docs/report/introduction/overview.rst23
-rw-r--r--docs/report/test_configuration/index.rst2
-rw-r--r--docs/report/test_configuration/vpp_functional_configuration/index.rst4
-rw-r--r--docs/report/test_configuration/vpp_functional_configuration_centos/index.rst12
-rw-r--r--docs/report/test_configuration/vpp_mrr_configuration/index.rst12
-rw-r--r--docs/report/vpp_performance_tests/impact_meltdown/index.rst164
-rw-r--r--docs/report/vpp_performance_tests/impact_spectreandmeltdown/index.rst167
-rw-r--r--docs/report/vpp_performance_tests/packet_latency_graphs/index.rst1
-rw-r--r--docs/report/vpp_performance_tests/packet_latency_graphs/srv6.rst48
-rw-r--r--docs/report/vpp_performance_tests/packet_throughput_graphs/index.rst1
-rw-r--r--docs/report/vpp_performance_tests/packet_throughput_graphs/srv6.rst99
-rw-r--r--docs/report/vpp_performance_tests/throughput_speedup_multi_core/container_memif.rst66
-rw-r--r--docs/report/vpp_performance_tests/throughput_speedup_multi_core/container_orchestrated.rst110
-rw-r--r--docs/report/vpp_performance_tests/throughput_speedup_multi_core/index.rst9
-rw-r--r--docs/report/vpp_performance_tests/throughput_speedup_multi_core/ip4.rst54
-rw-r--r--docs/report/vpp_performance_tests/throughput_speedup_multi_core/ip4_tunnels.rst66
-rw-r--r--docs/report/vpp_performance_tests/throughput_speedup_multi_core/ip6.rst54
-rw-r--r--docs/report/vpp_performance_tests/throughput_speedup_multi_core/ip6_tunnels.rst66
-rw-r--r--docs/report/vpp_performance_tests/throughput_speedup_multi_core/ipsec.rst71
-rw-r--r--docs/report/vpp_performance_tests/throughput_speedup_multi_core/l2.rst51
-rw-r--r--docs/report/vpp_performance_tests/throughput_speedup_multi_core/srv6.rst68
-rw-r--r--docs/report/vpp_performance_tests/throughput_speedup_multi_core/vm_vhost.rst229
-rw-r--r--docs/report/vpp_unit_tests/documentation.rst6
-rw-r--r--docs/report/vpp_unit_tests/index.rst8
-rw-r--r--docs/report/vpp_unit_tests/overview.rst87
-rw-r--r--resources/tools/presentation/environment.py39
-rw-r--r--resources/tools/presentation/generator_CPTA.py498
-rw-r--r--resources/tools/presentation/generator_files.py4
-rw-r--r--resources/tools/presentation/generator_plots.py8
-rw-r--r--resources/tools/presentation/generator_report.py4
-rw-r--r--resources/tools/presentation/generator_tables.py526
-rw-r--r--resources/tools/presentation/input_data_files.py394
-rw-r--r--resources/tools/presentation/input_data_parser.py205
-rw-r--r--resources/tools/presentation/pal.py27
-rw-r--r--resources/tools/presentation/specification.yaml1880
-rw-r--r--resources/tools/presentation/specification_parser.py60
-rw-r--r--resources/tools/presentation/utils.py161
62 files changed, 2964 insertions, 4618 deletions
diff --git a/docs/report/detailed_test_results/index.rst b/docs/report/detailed_test_results/index.rst
index d980a3532b..923f6c6f76 100644
--- a/docs/report/detailed_test_results/index.rst
+++ b/docs/report/detailed_test_results/index.rst
@@ -7,10 +7,9 @@
.. toctree::
vpp_performance_results/index
+ vpp_mrr_results/index
cot_performance_results/index
- dpdk_performance_results/index
vpp_functional_results/index
+ vpp_functional_results_centos/index
honeycomb_functional_results/index
nshsfc_functional_results/index
- vpp_unittest_results/index
- vpp_http_server_perf_results/index
diff --git a/docs/report/detailed_test_results/vpp_functional_results/index.rst b/docs/report/detailed_test_results/vpp_functional_results/index.rst
index 4d7060bb54..5b82c489c4 100644
--- a/docs/report/detailed_test_results/vpp_functional_results/index.rst
+++ b/docs/report/detailed_test_results/vpp_functional_results/index.rst
@@ -1,5 +1,5 @@
-VPP Functional Results
-======================
+VPP Functional Results - Ubuntu
+===============================
.. note::
diff --git a/docs/report/detailed_test_results/vpp_functional_results_centos/index.rst b/docs/report/detailed_test_results/vpp_functional_results_centos/index.rst
new file mode 100644
index 0000000000..e65b68b250
--- /dev/null
+++ b/docs/report/detailed_test_results/vpp_functional_results_centos/index.rst
@@ -0,0 +1,12 @@
+VPP Functional Results - CentOS
+===============================
+
+.. note::
+
+ Data sources for reported test results: i) `FD.io test executor vpp
+ functional jobs`_, ii) archived FD.io jobs test result `output files
+ <../../_static/archive/>`_.
+
+.. toctree::
+
+ vpp_functional_results_centos
diff --git a/docs/report/detailed_test_results/dpdk_performance_results/index.rst b/docs/report/detailed_test_results/vpp_mrr_results/index.rst
index e6466d2e60..ce8262ba7c 100644
--- a/docs/report/detailed_test_results/dpdk_performance_results/index.rst
+++ b/docs/report/detailed_test_results/vpp_mrr_results/index.rst
@@ -1,12 +1,12 @@
-DPDK Performance Results
-===========================
+VPP MRR Results
+===============
.. note::
- Data sources for reported test results: i) `FD.io test executor dpdk
+ Data sources for reported test results: i) `FD.io test executor vpp
performance jobs`_, ii) archived FD.io jobs test result `output files
<../../_static/archive/>`_.
.. toctree::
- dpdk_performance_results
+ vpp_mrr_results
diff --git a/docs/report/detailed_test_results/vpp_unittest_results/index.rst b/docs/report/detailed_test_results/vpp_unittest_results/index.rst
deleted file mode 100644
index f77153d6b9..0000000000
--- a/docs/report/detailed_test_results/vpp_unittest_results/index.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-VPP Unit Test Results
-=====================
-
-.. toctree::
-
- vpp_unittest_results
-
diff --git a/docs/report/detailed_test_results/vpp_unittest_results/vpp_unittest_results.rst b/docs/report/detailed_test_results/vpp_unittest_results/vpp_unittest_results.rst
deleted file mode 100644
index 3e1fd5ac57..0000000000
--- a/docs/report/detailed_test_results/vpp_unittest_results/vpp_unittest_results.rst
+++ /dev/null
@@ -1,1178 +0,0 @@
-ACL Security Groups
-```````````````````
-::
-
- ==============================================================================
- ACL plugin Test Case
- ==============================================================================
- ACL plugin version check; learn MACs OK
- ACL create/delete test OK
- permit ACL apply test OK
- deny ACL apply test OK
- VPP_624 permit ICMPv4 OK
- VPP_624 permit ICMPv6 OK
- VPP_624 deny ICMPv4 OK
- VPP_624 deny ICMPv6 OK
- permit TCPv4 OK
- permit TCPv6 OK
- permit UDPv4 OK
- permit UDPv6 OK
- deny TCPv4/v6 OK
- deny UDPv4/v6 OK
- verify add/dump acls OK
- permit single TCPv4 OK
- permit single UDPv4 OK
- permit single TCPv6 OK
- permit single UPPv6 OK
- deny single TCPv4/v6 OK
- deny single UDPv4/v6 OK
- deny single UDPv4/v6, permit ip any, verify non-initial fragment blocked OK
- VPP-687 zero length udp ipv4 packet OK
- VPP-687 zero length udp ipv6 packet OK
- permit TCPv4 + non-match range OK
- permit TCPv6 + non-match range OK
- permit UDPv4 + non-match range OK
- permit UDPv6 + non-match range OK
- deny TCPv4/v6 + non-match range OK
- deny UDPv4/v6 + non-match range OK
-
- ==============================================================================
- IRB Test Case
- ==============================================================================
- ACL plugin prepare OK
- ACL IPv6 routed -> bridged, L2 ACL deny OK
- ACL IPv6 routed -> bridged, L3 ACL deny OK
- ACL IPv4 routed -> bridged, L2 ACL deny OK
- ACL IPv4 routed -> bridged, L3 ACL deny OK
- ACL IPv6 bridged -> routed, L2 ACL deny OK
- ACL IPv6 bridged -> routed, L3 ACL deny OK
- ACL IPv4 bridged -> routed, L2 ACL deny OK
- ACL IPv4 bridged -> routed, L3 ACL deny OK
- ACL IPv6 routed -> bridged, L2 ACL permit+reflect OK
- ACL IPv6 bridged -> routed, L2 ACL permit+reflect OK
- ACL IPv4 routed -> bridged, L2 ACL permit+reflect OK
- ACL IPv4 bridged -> routed, L2 ACL permit+reflect OK
- ACL IPv6 routed -> bridged, L3 ACL permit+reflect OK
- ACL IPv6 bridged -> routed, L3 ACL permit+reflect OK
- ACL IPv4 routed -> bridged, L3 ACL permit+reflect OK
- ACL IPv4 bridged -> routed, L3 ACL permit+reflect OK
- ACL IPv6+EH routed -> bridged, L2 ACL deny OK
- ACL IPv6+EH routed -> bridged, L3 ACL deny OK
- ACL IPv6+EH bridged -> routed, L2 ACL deny OK
- ACL IPv6+EH bridged -> routed, L3 ACL deny OK
- ACL IPv6+EH routed -> bridged, L2 ACL permit+reflect OK
- ACL IPv6+EH bridged -> routed, L2 ACL permit+reflect OK
- ACL IPv6+EH routed -> bridged, L3 ACL permit+reflect OK
- ACL IPv6+EH bridged -> routed, L3 ACL permit+reflect OK
- ACL IPv4+MF routed -> bridged, L2 ACL deny OK
- ACL IPv4+MF routed -> bridged, L3 ACL deny OK
- ACL IPv4+MF bridged -> routed, L2 ACL deny OK
- ACL IPv4+MF bridged -> routed, L3 ACL deny OK
- ACL IPv4+MF routed -> bridged, L2 ACL permit+reflect OK
- ACL IPv4+MF bridged -> routed, L2 ACL permit+reflect OK
- ACL IPv4+MF routed -> bridged, L3 ACL permit+reflect OK
- ACL IPv4+MF bridged -> routed, L3 ACL permit+reflect OK
-
- ==============================================================================
- ACL plugin connection-oriented extended testcases
- ==============================================================================
- Prepare the settings SKIP
- IPv4: Basic conn timeout test reflect on ingress SKIP
- IPv4: Basic conn timeout test reflect on egress SKIP
- IPv4: reflect egress, clear conn SKIP
- IPv4: reflect ingress, clear conn SKIP
- IPv4: Idle conn behind active conn, reflect on ingress SKIP
- IPv4: Idle conn behind active conn, reflect on egress SKIP
- IPv6: Basic conn timeout test reflect on ingress SKIP
- IPv6: Basic conn timeout test reflect on egress SKIP
- IPv6: reflect egress, clear conn SKIP
- IPv6: reflect ingress, clear conn SKIP
- IPv6: Idle conn behind active conn, reflect on ingress SKIP
- IPv6: Idle conn behind active conn, reflect on egress SKIP
- Prepare for TCP session tests SKIP
- IPv4: transient TCP session (incomplete 3WHS), ref. on ingress SKIP
- IPv4: transient TCP session (incomplete 3WHS), ref. on egress SKIP
- IPv4: established TCP session (complete 3WHS), ref. on ingress SKIP
- IPv4: established TCP session (complete 3WHS), ref. on egress SKIP
- IPv4: transient TCP session (3WHS,ACK,FINACK), ref. on ingress SKIP
- IPv4: transient TCP session (3WHS,ACK,FINACK), ref. on egress SKIP
- IPv6: transient TCP session (incomplete 3WHS), ref. on ingress SKIP
- IPv6: transient TCP session (incomplete 3WHS), ref. on egress SKIP
- IPv6: established TCP session (complete 3WHS), ref. on ingress SKIP
- IPv6: established TCP session (complete 3WHS), ref. on egress SKIP
- IPv6: transient TCP session (3WHS,ACK,FINACK), ref. on ingress SKIP
- IPv6: transient TCP session (3WHS,ACK,FINACK), ref. on egress SKIP
-
- ==============================================================================
- ACL on dot1q bridged subinterfaces Tests
- ==============================================================================
- IP4 ACL SubIf Dot1Q bridged traffic OK
- IP6 ACL SubIf Dot1Q bridged traffic OK
-
- ==============================================================================
- ACL on dot1ad bridged subinterfaces Tests
- ==============================================================================
- IP4 ACL SubIf Dot1AD bridged traffic OK
- IP6 ACL SubIf Dot1AD bridged traffic OK
-
- ==============================================================================
- ACL on dot1ad routed subinterfaces Tests
- ==============================================================================
- IP4 ACL SubIf Dot1AD routed traffic OK
- IP4 ACL SubIf wrong tags Dot1AD routed traffic OK
- IP6 ACL SubIf Dot1AD routed traffic OK
- IP6 ACL SubIf wrong tags Dot1AD routed traffic OK
-
- ==============================================================================
- ACL on dot1q routed subinterfaces Tests
- ==============================================================================
- IP4 ACL SubIf Dot1Q routed traffic OK
- IP4 ACL SubIf wrong tags Dot1Q routed traffic OK
- IP6 ACL SubIf Dot1Q routed traffic OK
- IP6 ACL SubIf wrong tags Dot1Q routed traffic OK
-
-APIs
-````
-::
-
- ==============================================================================
- VAPI test
- ==============================================================================
- run C VAPI tests SKIP
- run C++ VAPI tests SKIP
-
- ==============================================================================
- VPP Object Model Test
- ==============================================================================
- run C++ VOM tests SKIP
-
- ==============================================================================
- PAPI Test Case
- ==============================================================================
- show version OK
- show version - invalid parameters OK
- u8 array OK
-
- ==============================================================================
- PAPI Message parsing Test Case
- ==============================================================================
- New compound type with array OK
- Add new types OK
- Add new types 2 OK
- Add new message object OK
- New message with array OK
- Argument name OK
- VLA with aribtrary length field placement OK
- Message to byte encoding OK
- Nested array type OK
- Old style VLA array OK
- Old VLA compound type OK
- Old VLA array arbitrary placement OK
- Old VLA u32 OK
- Simple array OK
-
- ==============================================================================
- JVPP Core Test Case
- ==============================================================================
- JVPP Acl Callback Api Test Case OK
- JVPP Acl Future Api Test Case OK
- JVPP Core Callback Api Test Case OK
- JVPP Core Future Api Test Case OK
- JVPP Ioamexport Callback Api Test Case OK
- JVPP Ioamexport Future Api Test Case OK
- JVPP Ioampot Callback Api Test Case OK
- JVPP Ioampot Future Api Test Case OK
- JVPP Ioamtrace Callback Api Test Case OK
- JVPP Ioamtrace Future Api Test Case OK
- JVPP Snat Callback Api Test Case OK
- JVPP Snat Future Api Test Case OK
-
-ARP
-```
-::
-
- ==============================================================================
- ARP Test Case
- ==============================================================================
- ARP OK
- ARP Duplicates OK
- ARP Static OK
- ARP reply with VRRP virtual src hw addr OK
- MPLS OK
- Proxy ARP OK
- Interface Mirror Proxy ARP OK
-
- ==============================================================================
- L2BD arp termination Test Case
- ==============================================================================
- L2BD arp term - add 5 hosts, verify arp responses OK
- L2BD arp term - delete 3 hosts, verify arp responses OK
- L2BD arp term - recreate BD1, readd 3 hosts, verify arp responses OK
- L2BD arp term - 2 IP4 addrs per host OK
- L2BD arp term - create and update 10 IP4-mac pairs OK
- L2BD arp/ND term - hosts with both ip4/ip6 OK
- L2BD ND term - Add and Del hosts, verify ND replies OK
- L2BD ND term - Add and update IP+mac, verify ND replies OK
- L2BD arp term - send garps, verify arp event reports OK
- L2BD arp term - send duplicate garps, verify suppression OK
- L2BD arp term - disable ip4 arp events,send garps, verify no events OK
- L2BD ND term - send NS packets verify reports OK
- L2BD ND term - send duplicate ns, verify suppression OK
- L2BD ND term - disable ip4 arp events,send ns, verify no events OK
-
-BFD API
-````````
-::
-
- ==============================================================================
- Bidirectional Forwarding Detection (BFD) - API
- ==============================================================================
- activate SHA1 authentication SKIP
- create BFD session using non-existent SHA1 (negative case) SKIP
- create a BFD session SKIP
- create IPv6 BFD session SKIP
- create a BFD session (SHA1) SKIP
- add SHA1 keys SKIP
- change SHA1 key SKIP
- deactivate SHA1 authentication SKIP
- create the same BFD session twice (negative case) SKIP
- create the same BFD session twice (negative case) (SHA1) SKIP
- modify BFD session parameters SKIP
- share single SHA1 key between multiple BFD sessions SKIP
-
-BFD Authentication
-``````````````````
-::
-
- ==============================================================================
- Bidirectional Forwarding Detection (BFD) (SHA1 auth)
- ==============================================================================
- hold BFD session up SKIP
- hold BFD session up - meticulous auth SKIP
- session is not brought down by unauthenticated msg SKIP
- session is not brought down by msg with non-existent key-id SKIP
- session is not brought down by msg with wrong auth type SKIP
- simulate remote peer restart and resynchronization SKIP
- session is not kept alive by msgs with bad sequence numbers SKIP
- bring BFD session up SKIP
-
-BFD Authentication Change
-`````````````````````````
-::
-
- ==============================================================================
- Bidirectional Forwarding Detection (BFD) (changing auth)
- ==============================================================================
- change auth key without disturbing session state (delayed) SKIP
- change auth key without disturbing session state (immediate) SKIP
- turn auth off without disturbing session state (delayed) SKIP
- turn auth off without disturbing session state (immediate) SKIP
- turn auth on without disturbing session state (delayed) SKIP
- turn auth on without disturbing session state (immediate) SKIP
-
-BFD CLI
-````````
-::
-
- ==============================================================================
- Bidirectional Forwarding Detection (BFD) (CLI)
- ==============================================================================
- create/modify/delete IPv4 BFD UDP session SKIP
- create/modify/delete IPv6 BFD UDP session SKIP
- create/modify/delete IPv6 BFD UDP session (authenticated) SKIP
- create/modify/delete IPv4 BFD UDP session (authenticated) SKIP
- put session admin-up and admin-down SKIP
- turn authentication on and off SKIP
- turn authentication on and off (delayed) SKIP
- set/delete meticulous SHA1 auth key SKIP
- set/delete SHA1 auth key SKIP
- set/del udp echo source SKIP
- show commands SKIP
-
-BFD IPv4
-````````
-::
-
- ==============================================================================
- Bidirectional Forwarding Detection (BFD)
- ==============================================================================
- put session admin-up and admin-down SKIP
- configuration change while peer in demand mode SKIP
- verify session goes down after inactivity SKIP
- echo function SKIP
- session goes down if echo function fails SKIP
- echo packets looped back SKIP
- echo function stops if echo source is removed SKIP
- echo function stops if peer sets required min echo rx zero SKIP
- hold BFD session up SKIP
- immediately honor remote required min rx reduction SKIP
- interface with bfd session deleted SKIP
- echo packets with invalid checksum don't keep a session up SKIP
- large remote required min rx interval SKIP
- modify detect multiplier SKIP
- modify session - double required min rx SKIP
- modify session - halve required min rx SKIP
- no periodic frames outside poll sequence if remote demand set SKIP
- test correct response to control frame with poll bit set SKIP
- test poll sequence queueing SKIP
- bring BFD session down SKIP
- bring BFD session up SKIP
- bring BFD session up - first frame looked up by address pair SKIP
- verify slow periodic control frames while session down SKIP
- stale echo packets don't keep a session up SKIP
- no packets when zero remote required min rx interval SKIP
-
-BFD IPv6
-````````
-::
-
- ==============================================================================
- Bidirectional Forwarding Detection (BFD) (IPv6)
- ==============================================================================
- echo function used SKIP
- echo packets looped back SKIP
- hold BFD session up SKIP
- interface with bfd session deleted SKIP
- bring BFD session up SKIP
- bring BFD session up - first frame looked up by address pair SKIP
-
- ==============================================================================
- BFD-FIB interactions (IPv6)
- ==============================================================================
- BFD-FIB interactions SKIP
-
-BIER - Bit Indexed Explicit Replication
-```````````````````````````````````````
-::
-
- ==============================================================================
- BIER Test Case
- ==============================================================================
- BIER end-to-end OK
- BIER head OK
- BIER head over UDP OK
- BIER midpoint OK
- BIER Tail OK
- BIER Tail over UDP OK
-
- ==============================================================================
- BIER FIB Test Case
- ==============================================================================
- BFIB Unit Tests OK
-
-Classifier
-``````````
-::
-
- ==============================================================================
- Classifier Test Case
- ==============================================================================
- IP ACL test OK
- MAC ACL test OK
- IP PBR test OK
-
-Container Integration
-`````````````````````
-::
-
- ==============================================================================
- Container integration extended testcases
- ==============================================================================
- IPv4 basic connectivity test SKIP
- IPv6 basic connectivity test SKIP
- Create loopbacks overlapping with remote addresses SKIP
- IPv4 local-spoof connectivity test SKIP
- IPv6 local-spoof connectivity test SKIP
- Configure container commands SKIP
- IPv4 test after configuring container SKIP
- IPv6 test after configuring container SKIP
- Unconfigure container commands SKIP
- IPv4 local-spoof after unconfig test SKIP
- IPv6 local-spoof after unconfig test SKIP
-
-CRUD Loopback
-`````````````
-::
-
- ==============================================================================
- CRUD Loopback
- ==============================================================================
- test_crud (test_interface_crud.TestLoopbackInterfaceCRUD) OK
- test_down (test_interface_crud.TestLoopbackInterfaceCRUD) OK
-
-DHCP
-````
-::
-
- ==============================================================================
- DHCP Test Case
- ==============================================================================
- DHCPv6 Proxy OK
- DHCP Client OK
- DHCPv4 Proxy OK
-
-Distributed Virtual Router
-``````````````````````````
-::
-
- ==============================================================================
- Distributed Virtual Router
- ==============================================================================
- Distributed Virtual Router OK
- L2 Emulation OK
-
-DS-Lite Softwire
-````````````````
-::
-
- ==============================================================================
- DS-Lite Test Cases
- ==============================================================================
- Test DS-Lite OK
-
-FIB
-```
-::
-
- ==============================================================================
- FIB Test Case
- ==============================================================================
- FIB Unit Tests OK
-
-Flowprobe
-`````````
-::
-
- ==============================================================================
- Re-enable Flowprobe feature
- ==============================================================================
- disable flowprobe feature after first packets and re-enable SKIP
-
- ==============================================================================
- collect information on Ethernet, IP4 and IP6 datapath (no timers)
- ==============================================================================
- no timers, one CFLOW packet, 9 Flows inside OK
- no timers, two CFLOW packets (mtu=256), 3 Flows in each OK
- L2 data on IP4 datapath OK
- L2 data on IP6 datapath OK
- L2 data on L2 datapath OK
- L3 data on IP4 datapath OK
- L3 data on IP6 datapath OK
- L3 data on L2 datapath OK
- L4 data on IP4 datapath OK
- L4 data on IP6 datapath OK
- L4 data on L2 datapath OK
- verify templates on IP6 datapath OK
- verify templates on IP4 datapath OK
- verify template on L2 datapath OK
-
- ==============================================================================
- Disable Flowprobe feature
- ==============================================================================
- disable flowprobe feature after first packets SKIP
-
- ==============================================================================
- Re-enable IPFIX
- ==============================================================================
- disable IPFIX after first packets and re-enable after few packets SKIP
-
- ==============================================================================
- Disable IPFIX
- ==============================================================================
- disable IPFIX after first packets SKIP
-
-Geneve Tunnels
-``````````````
-::
-
- ==============================================================================
- GENEVE Test Case
- ==============================================================================
- Decapsulation test OK
- Encapsulation test OK
- Multicast flood test OK
- Multicast receive test OK
- Unicast flood test OK
-
-GRE Tunnels
-```````````
-::
-
- ==============================================================================
- GRE Test Case
- ==============================================================================
- GRE IPv4 tunnel Tests OK
- GRE IPv6 tunnel Tests OK
- GRE tunnel L2 Tests OK
- GRE tunnel VRF Tests OK
-
-GTPU Tunnels
-````````````
-::
-
- ==============================================================================
- GTPU Test Case
- ==============================================================================
- Decapsulation test OK
- Encapsulation test OK
- Multicast flood test OK
- Multicast receive test OK
- Unicast flood test OK
-
-IP Multicast Routing
-````````````````````
-::
-
- ==============================================================================
- IP Multicast Test Case
- ==============================================================================
- IP Multicast Bi-directional OK
- IPv6 Multicast Replication OK
- IPv6 Multicast Replication in non-default table OK
- IP Multicast Replication OK
- IP Multicast Connected Source check OK
- IP Multicast Signal OK
- IP Multicast Replication in non-default table OK
-
-IPSec
-`````
-::
-
- ==============================================================================
- Basic test for IPSEC using AH transport and Tunnel mode
- ==============================================================================
- ipsec ah v4 transport basic test OK
- ipsec ah v4 transport burst test OK
- ipsec ah 4o4 tunnel basic test OK
- ipsec ah 4o4 tunnel burst test OK
-
- ==============================================================================
- Basic test for ipsec esp sanity - tunnel and transport modes.
- ==============================================================================
- ipsec esp v4 transport basic test OK
- ipsec esp v4 transport burst test OK
- ipsec esp 4o4 tunnel basic test OK
- ipsec esp 4o4 tunnel burst test OK
-
-IPv4 FIB CRUD
-`````````````
-::
-
- ==============================================================================
- FIB - add/update/delete - ip4 routes
- ==============================================================================
- Add 1k routes OK
- Delete 100 routes OK
- Add 1k routes OK
- Delete 1.5k routes OK
-
-IPv4 Routing
-````````````
-::
-
- ==============================================================================
- IPv4 Test Case
- ==============================================================================
- IPv4 FIB test OK
-
- ==============================================================================
- IPv4 routes via NULL
- ==============================================================================
- IP NULL route OK
-
- ==============================================================================
- IPv4 disabled
- ==============================================================================
- IP Disabled OK
-
- ==============================================================================
- IPv4 Subnets
- ==============================================================================
- IP Sub Nets OK
-
- ==============================================================================
- IPv4 VLAN-0
- ==============================================================================
- IP VLAN-0 OK
-
- ==============================================================================
- IPv4 Load-Balancing
- ==============================================================================
- IP Load-Balancing OK
-
- ==============================================================================
- IPv4 Deaggregate Routes
- ==============================================================================
- IP Deag Routes OK
-
- ==============================================================================
- IPv4 Input Exceptions
- ==============================================================================
- IP Input Exceptions OK
-
- ==============================================================================
- IPv4 Punt Police/Redirect
- ==============================================================================
- IP punt police and redirect OK
-
-IPv4 VRF Multi-instance
-```````````````````````
-::
-
- ==============================================================================
- IP4 VRF Multi-instance Test Case
- ==============================================================================
- IP4 VRF Multi-instance test 1 - create 5 BDs OK
- IP4 VRF Multi-instance test 2 - delete 2 VRFs OK
- IP4 VRF Multi-instance 3 - add 2 VRFs OK
- IP4 VRF Multi-instance test 4 - delete 4 VRFs OK
-
-IPv6 Routing
-````````````
-::
-
- ==============================================================================
- IPv6 Test Case
- ==============================================================================
- IPv6 FIB test OK
- IPv6 Neighbour Solicitation Exceptions OK
- ND Duplicates OK
- IPv6 Router Solicitation Exceptions OK
-
- ==============================================================================
- IPv6 Punt Police/Redirect
- ==============================================================================
- IP6 punt police and redirect OK
-
- ==============================================================================
- IPv6 disabled
- ==============================================================================
- IP Disabled OK
-
- ==============================================================================
- IPv6 ND ProxyTest Case
- ==============================================================================
- IPv6 Proxy ND OK
-
- ==============================================================================
- IPv6 Load-Balancing
- ==============================================================================
- IPv6 Load-Balancing OK
-
- ==============================================================================
- IPv6 routes via NULL
- ==============================================================================
- IP NULL route OK
-
- ==============================================================================
- IPv6 Input Exceptions
- ==============================================================================
- IP6 Input Exceptions OK
-
-IPv6 VRF Multi-instance
-```````````````````````
-::
-
- ==============================================================================
- IP6 VRF Multi-instance Test Case
- ==============================================================================
- IP6 VRF Multi-instance test 1 - create 4 VRFs OK
- IP6 VRF Multi-instance test 2 - reset 2 VRFs OK
- IP6 VRF Multi-instance 3 - add 2 VRFs OK
- IP6 VRF Multi-instance test 4 - reset 4 VRFs OK
-
-IRB Integrated Routing-Bridging
-```````````````````````````````
-::
-
- ==============================================================================
- IRB Test Case
- ==============================================================================
- IPv4 IRB test 1 OK
- IPv4 IRB test 2 OK
-
-Kube-proxy
-``````````
-::
-
- ==============================================================================
- Kube-proxy Test Case
- ==============================================================================
- Kube-proxy NAT44 OK
- Kube-proxy NAT46 SKIP
- Kube-proxy NAT64 SKIP
- Kube-proxy NAT66 SKIP
-
-L2 FIB CRUD
-```````````
-::
-
- ==============================================================================
- L2 FIB Test Case
- ==============================================================================
- L2 FIB - program 100 + 100 MACs OK
- L2 FIB - program 100 + delete 12 MACs OK
- L2 FIB - flush all OK
- L2 FIB - flush BD OK
- L2 FIB - flush interface OK
- L2 FIB - mac learning events OK
- L2 FIB - mac learning max macs in event OK
- L2 FIB - program 100 MACs OK
- L2 FIB - Program 10 MACs, learn 10 OK
-
-L2BD Multi-instance
-```````````````````
-::
-
- ==============================================================================
- L2BD Multi-instance Test Case
- ==============================================================================
- L2BD Multi-instance test 1 - create 5 BDs OK
- L2BD Multi-instance test 2 - update data of 5 BDs OK
- L2BD Multi-instance test 3 - delete 2 BDs OK
- L2BD Multi-instance test 4 - add 2 BDs OK
- L2BD Multi-instance test 5 - delete 5 BDs SKIP
-
-L2BD Switching
-``````````````
-::
-
- ==============================================================================
- L2BD Test Case
- ==============================================================================
- L2BD MAC learning dual-loop test OK
- L2BD MAC learning single-loop test OK
-
-L2XC Multi-instance
-```````````````````
-::
-
- ==============================================================================
- L2XC Multi-instance Test Case
- ==============================================================================
- L2XC Multi-instance test 1 - create 10 cross-connects OK
- L2XC Multi-instance test 2 - delete 4 cross-connects OK
- L2BD Multi-instance 3 - add new 4 cross-connects OK
- L2XC Multi-instance test 4 - delete 10 cross-connects OK
-
-L2XC Switching
-``````````````
-::
-
- ==============================================================================
- L2XC Test Case
- ==============================================================================
- L2XC dual-loop test OK
- L2XC single-loop test OK
-
-LISP Tunnels
-````````````
-::
-
- ==============================================================================
- Basic LISP test
- ==============================================================================
- Test case for basic encapsulation OK
-
-Load Balancer
-`````````````
-::
-
- ==============================================================================
- Load Balancer Test Case
- ==============================================================================
- Load Balancer IP4 GRE4 OK
- Load Balancer IP4 GRE6 OK
- Load Balancer IP6 GRE4 OK
- Load Balancer IP6 GRE6 OK
-
-MACIP Access Control
-````````````````````
-::
-
- ==============================================================================
- MACIP Tests
- ==============================================================================
- MACIP 10 ACLs each with 100+ entries OK
- MACIP 10 ACLs each with 100+ entries with IP4 traffic OK
- MACIP 10 ACLs each with 100+ entries with IP6 traffic OK
- MACIP ACL with 10 entries OK
- MACIP ACL with 100 entries OK
- MACIP ACL with 2 entries OK
- MACIP ACL with 20 entries OK
- MACIP ACL with 5 entries OK
- MACIP ACL with 50 entries OK
- MACIP 2 ACLs each with 100+ entries OK
- MACIP replace ACL OK
- MACIP ACL delete intf with acl OK
-
- ==============================================================================
- MACIP with IP6 traffic
- ==============================================================================
- IP6 MACIP exactMAC|exactIP ACL bridged traffic OK
- IP6 MACIP exactMAC|subnetIP ACL bridged traffic OK
- IP6 MACIP exactMAC|wildIP ACL bridged traffic OK
- IP6 MACIP oui_MAC|exactIP ACL bridged traffic OK
- IP6 MACIP ouiMAC|subnetIP ACL bridged traffic OK
- IP6 MACIP ouiMAC|wildIP ACL bridged traffic OK
- IP6 MACIP wildcardMAC|exactIP ACL bridged traffic OK
- IP6 MACIP wildcardMAC|subnetIP ACL bridged traffic OK
- IP6 MACIP wildcardMAC|wildIP ACL bridged traffic OK
- MACIP replace ACL with IP6 traffic OK
- IP6 MACIP exactMAC|exactIP ACL routed traffic OK
- IP6 MACIP exactMAC|subnetIP ACL routed traffic OK
- IP6 MACIP exactMAC|wildIP ACL routed traffic OK
- IP6 MACIP ouiMAC|exactIP ACL routed traffic OK
- IP6 MACIP ouiMAC|subnetIP ACL routed traffic OK
- IP6 MACIP ouiMAC|wildIP ACL routed traffic OK
- IP6 MACIP wildcardMAC|exactIP ACL routed traffic OK
- IP6 MACIP wildcardMAC|subnetIP ACL routed traffic OK
- IP6 MACIP wildcardMAC|wildIP ACL OK
-
- ==============================================================================
- MACIP with IP4 traffic
- ==============================================================================
- IP4 MACIP wildcardMAC|exactIP ACL bridged traffic OK
- IP4 MACIP exactMAC|exactIP ACL bridged traffic OK
- IP4 MACIP exactMAC|subnetIP ACL bridged traffic OK
- IP4 MACIP exactMAC|wildIP ACL bridged traffic OK
- IP4 MACIP ouiMAC|exactIP ACL bridged traffic OK
- IP4 MACIP ouiMAC|subnetIP ACL bridged traffic OK
- IP4 MACIP ouiMAC|wildIP ACL bridged traffic OK
- IP4 MACIP wildcardMAC|subnetIP ACL bridged traffic OK
- IP4 MACIP wildcardMAC|wildIP ACL bridged traffic OK
- MACIP replace ACL with IP4 traffic OK
- IP4 MACIP exactMAC|exactIP ACL routed traffic OK
- IP4 MACIP exactMAC|subnetIP ACL routed traffic OK
- IP4 MACIP exactMAC|wildIP ACL routed traffic OK
- IP4 MACIP ouiMAC|exactIP ACL routed traffic OK
- IP4 MACIP ouiMAC|subnetIP ACL routed traffic OK
- IP4 MACIP ouiMAC|wildIP ACL routed traffic OK
- IP4 MACIP wildcardMAC|exactIP ACL routed traffic OK
- IP4 MACIP wildcardMAC|subnetIP ACL routed traffic OK
- IP4 MACIP wildcardMAC|wildIP ACL OK
-
-MAP Softwires
-`````````````
-::
-
- ==============================================================================
- MAP Test Case
- ==============================================================================
- MAP-E OK
-
-MFIB Multicast FIB
-``````````````````
-::
-
- ==============================================================================
- MFIB Test Case
- ==============================================================================
- MFIB Unit Tests OK
-
-MPLS Switching
-``````````````
-::
-
- ==============================================================================
- MPLS-L2
- ==============================================================================
- Virtual Private LAN Service OK
- Virtual Private Wire Service OK
-
- ==============================================================================
- MPLS Test Case
- ==============================================================================
- MPLS Local Label Binding test OK
- MPLS Deagg OK
- MPLS label imposition test OK
- MPLS Interface Receive OK
- MPLS Multicast Head-end OK
- MPLS IPv4 Multicast Tail OK
- MPLS IPv6 Multicast Tail OK
- MPLS Multicast Mid Point OK
- MPLS label swap tests OK
- MPLS Tunnel Tests OK
- MPLS V4 Explicit NULL test OK
- MPLS V6 Explicit NULL test OK
-
- ==============================================================================
- MPLS PIC edge convergence
- ==============================================================================
- MPLS eBGP PIC edge convergence OK
- MPLS iBGP PIC edge convergence OK
- MPLSv6 eBGP PIC edge convergence OK
-
- ==============================================================================
- MPLS disabled
- ==============================================================================
- MPLS Disabled OK
-
-NAT44
-`````
-::
-
- ==============================================================================
- NAT44 Test Cases
- ==============================================================================
- Delete NAT44 session OK
- NAT44 dynamic translation test OK
- NAT44 handling of client packets with TTL=1 OK
- NAT44 handling of error responses to client packets with TTL=2 OK
- NAT44 handling of server packets with TTL=1 OK
- NAT44 handling of error responses to server packets with TTL=2 OK
- NAT44 interfaces without configured IP address OK
- NAT44 forwarding test OK
- NAT44 translate fragments arriving in order OK
- NAT44 translate fragments arriving out of order OK
- NAT44 hairpinning - 1:1 NAPT OK
- NAT44 hairpinning - 1:1 NAT OK
- 1:1 NAT translate packet with unknown protocol - hairpinning OK
- NAT44 translate packet with unknown protocol - hairpinning OK
- Identity NAT OK
- NAT44 multiple inside interfaces with overlapping address space OK
- Acquire NAT44 addresses from interface OK
- Identity NAT with addresses from interface OK
- Static mapping with addresses from interface OK
- IPFIX logging NAT addresses exhausted OK
- IPFIX logging NAT44 session created/delted OK
- MAX translations per user - recycle the least recently used OK
- NAT44 multiple non-overlapping address space inside interfaces OK
- One armed NAT44 OK
- NAT44 interface output feature (in2out postrouting) OK
- NAT44 interface output feature hairpinning (in2out postrouting) OK
- NAT44 interface output feature VRF aware (in2out postrouting) OK
- Ping internal host from outside network OK
- Ping NAT44 out interface from outside network OK
- NAT44 add pool addresses to FIB OK
- Port restricted NAT44 (MAP-E CE) OK
- NAT44 fragments hairpinning OK
- NAT44 set/get virtual fragmentation reassembly OK
- 1:1 NAT initialized from inside network OK
- NAT44 interfaces without configured IP address - 1:1 NAT OK
- NAT44 local service load balancing OK
- 1:1 NAT initialized from outside network OK
- 1:1 NAT translate packet with unknown protocol OK
- 1:1 NAT VRF awareness OK
- 1:1 NAPT initialized from inside network OK
- NAT44 interfaces without configured IP address - 1:1 NAPT OK
- 1:1 NAPT initialized from outside network OK
- Twice NAT44 OK
- Acquire twice NAT44 addresses from interface OK
- Twice NAT44 local service load balancing OK
- NAT44 translate packet with unknown protocol OK
- NAT44 tenant VRF independent address pool mode OK
- NAT44 tenant VRF aware address pool mode OK
-
- ==============================================================================
- Deterministic NAT Test Cases
- ==============================================================================
- Deterministic NAT translation test (TCP, UDP, ICMP) OK
- NAT plugin run deterministic mode OK
- Deterministic NAT multiple users OK
- Deterministic NAT maximum sessions per user limit SKIP
- Deterministic NAT session timeouts SKIP
- Set deterministic NAT timeouts OK
- Deterministic NAT TCP session close from inside network OK
- Deterministic NAT TCP session close from outside network OK
-
-NAT64
-`````
-::
-
- ==============================================================================
- NAT64 Test Cases
- ==============================================================================
- NAT64 dynamic translation test OK
- NAT64 translate fragments arriving in order OK
- NAT64 translate fragments arriving out of order OK
- NAT64 hairpinning OK
- NAT64 translate packet with unknown protocol - hairpinning OK
- NAT64 ICMP Error message translation OK
- Enable/disable NAT64 feature on the interface OK
- Acquire NAT64 pool addresses from interface OK
- One armed NAT64 OK
- Add/delete address to NAT64 pool OK
- NAT64 Network-Specific Prefix OK
- NAT64 fragments hairpinning OK
- NAT64 session timeout SKIP
- Set NAT64 timeouts OK
- NAT64 static translation test OK
- Add/delete static BIB entry OK
- NAT64 translate packet with unknown protocol OK
-
-P2P Ethernet Subinterface
-`````````````````````````
-::
-
- ==============================================================================
- P2P Ethernet tests
- ==============================================================================
- delete/create p2p subif OK
- create 1k of p2p subifs OK
-
- ==============================================================================
- P2P Ethernet IPv4 tests
- ==============================================================================
- receive ipv4 packet via p2p subinterface OK
- route rx packet not matching p2p subinterface OK
- send ip4 packet via p2p subinterface OK
- drop tx ip4 packet not matching p2p subinterface OK
-
- ==============================================================================
- P2P Ethernet IPv6 tests
- ==============================================================================
- receive ipv6 packet via p2p subinterface OK
- drop rx packet not matching p2p subinterface OK
- route rx ip6 packet not matching p2p subinterface OK
- send packet via p2p subinterface OK
- drop tx ip6 packet not matching p2p subinterface OK
- standard routing without p2p subinterfaces OK
-
-PPPoE Encapsulation
-```````````````````
-::
-
- ==============================================================================
- PPPoE Test Case
- ==============================================================================
- PPPoE Add Same Session Twice Test OK
- PPPoE Decap Test OK
- PPPoE Decap Multiple Sessions Test OK
- PPPoE Delete Same Session Twice Test OK
- PPPoE Encap Test OK
- PPPoE Encap Multiple Sessions Test OK
-
-SPAN Switch Port Analyzer
-`````````````````````````
-::
-
- ==============================================================================
- SPAN Test Case
- ==============================================================================
- SPAN device rx mirror OK
- SPAN l2 broadcast mirror OK
- SPAN l2 rx tx mirror OK
- SPAN l2 tx mirror OK
- SPAN l2 rx mirror OK
- SPAN l2 rx mirror into 1ad subif+vtr OK
- SPAN l2 rx mirror into 1q subif+vtr OK
- SPAN l2 rx mirror into gre-subif+vtr OK
- SPAN l2 rx mirror into vxlan OK
-
-SRv6 Routing
-````````````
-::
-
- ==============================================================================
- SRv6 Test Case
- ==============================================================================
- Test SRv6 End (without PSP) behavior. OK
- Test SRv6 End.DT4 behavior. OK
- Test SRv6 End.DT6 behavior. OK
- Test SRv6 End.DX2 behavior. OK
- Test SRv6 End.DX4 behavior. OK
- Test SRv6 End.DX6 behavior. OK
- Test SRv6 End.X (without PSP) behavior. OK
- Test SRv6 End.X with PSP behavior. OK
- Test SRv6 End with PSP behavior. OK
- Test SRv6 Transit.Encaps behavior for IPv6. OK
- Test SRv6 Transit.Encaps behavior for IPv4. OK
- Test SRv6 Transit.Encaps behavior for L2. SKIP
- Test SRv6 Transit.Insert behavior (IPv6 only). OK
- Test SRv6 Transit.Insert behavior (IPv6 only). OK
-
-TCP/IP Stack
-````````````
-::
-
- ==============================================================================
- TCP Test Case
- ==============================================================================
- TCP builtin client/server transfer OK
- TCP Unit Tests OK
-
-UDP Stack
-`````````
-::
-
- ==============================================================================
- UDP Encap Test Case
- ==============================================================================
- UDP Encap test OK
-
-VTR VLAN Tag Rewrites
-`````````````````````
-::
-
- ==============================================================================
- VTR Test Case
- ==============================================================================
- 1AD VTR pop 1 test OK
- 1AD VTR pop 2 test OK
- 1AD VTR push 1 1AD test OK
- 1AD VTR push 1 1Q test OK
- 1AD VTR push 2 1AD test OK
- 1AD VTR push 2 1Q test OK
- 1AD VTR translate 1 -> 1 1AD test OK
- 1AD VTR translate 1 -> 1 1Q test OK
- 1AD VTR translate 1 -> 2 1AD test OK
- 1AD VTR translate 1 -> 2 1Q test OK
- 1AD VTR translate 2 -> 1 1AD test OK
- 1AD VTR translate 2 -> 1 1Q test OK
- 1AD VTR translate 2 -> 2 1AD test OK
- 1AD VTR translate 2 -> 2 1Q test OK
- 1Q VTR pop 1 test OK
- 1Q VTR push 1 test OK
- 1Q VTR push 2 test OK
- 1Q VTR translate 1 -> 1 test OK
- 1Q VTR translate 1 -> 2 test OK
-
-VXLAN Tunnels
-`````````````
-::
-
- ==============================================================================
- VXLAN Test Case
- ==============================================================================
- Decapsulation test OK
- Encapsulation test OK
- Multicast flood test OK
- Multicast receive test OK
- Unicast flood test OK
-
-VXLAN-GPE Tunnels
-`````````````````
-::
-
- ==============================================================================
- VXLAN-GPE Test Case
- ==============================================================================
- Decapsulation test SKIP
- Encapsulation test SKIP
- Multicast flood test SKIP
- Multicast receive test SKIP
- Unicast flood test SKIP
-
-Other Tests
-```````````
-::
-
- ==============================================================================
- Ping Test Case
- ==============================================================================
- basic ping test OK
- burst ping test OK
-
- ==============================================================================
- Session Test Case
- ==============================================================================
- Session Unit Tests OK
-
- ==============================================================================
- Template verification, timer tests
- ==============================================================================
- timer less than template timeout OK
- timer greater than template timeout OK
- verify cflow packet fields OK
-
diff --git a/docs/report/dpdk_performance_tests/csit_release_notes.rst b/docs/report/dpdk_performance_tests/csit_release_notes.rst
deleted file mode 100644
index 413c7c3cea..0000000000
--- a/docs/report/dpdk_performance_tests/csit_release_notes.rst
+++ /dev/null
@@ -1,23 +0,0 @@
-CSIT Release Notes
-==================
-
-Changes in CSIT |release|
--------------------------
-
-No code changes apart from bug fixes.
-
-Known Issues
-------------
-
-Here is the list of known issues in CSIT |release| for Testpmd performance tests:
-
-+---+---------------------------------------------------+------------+-----------------------------------------------------------------+
-| # | Issue | Jira ID | Description |
-+---+---------------------------------------------------+------------+-----------------------------------------------------------------+
-| 1 | Testpmd in 1t1c and 2t2c setups - large variation | CSIT-569 | Suspected NIC firmware or DPDK driver issue affecting NDR |
-| | of discovered NDR throughput values across | | throughput. Applies to XL710 and X710 NICs, no issues observed |
-| | multiple test runs with xl710 and x710 NICs. | | on x520 NICs. |
-+---+---------------------------------------------------+------------+-----------------------------------------------------------------+
-| 2 | Lower than expected NDR throughput with xl710 | CSIT-571 | Suspected NIC firmware or DPDK driver issue affecting NDR |
-| | and x710 NICs, compared to x520 NICs. | | throughput. Applies to XL710 and X710 NICs. |
-+---+---------------------------------------------------+------------+-----------------------------------------------------------------+
diff --git a/docs/report/dpdk_performance_tests/documentation.rst b/docs/report/dpdk_performance_tests/documentation.rst
deleted file mode 100644
index 47ec7cbe2a..0000000000
--- a/docs/report/dpdk_performance_tests/documentation.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-Documentation
-=============
-
-`CSIT DPDK Performance Tests Documentation`_ contains detailed
-functional description and input parameters for each test case.
diff --git a/docs/report/dpdk_performance_tests/index.rst b/docs/report/dpdk_performance_tests/index.rst
deleted file mode 100644
index 44d34d3d4d..0000000000
--- a/docs/report/dpdk_performance_tests/index.rst
+++ /dev/null
@@ -1,12 +0,0 @@
-DPDK Performance Tests
-======================
-
-.. toctree::
-
- overview
- csit_release_notes
- packet_throughput_graphs/index
- packet_latency_graphs/index
- test_environment
- documentation
-
diff --git a/docs/report/dpdk_performance_tests/overview.rst b/docs/report/dpdk_performance_tests/overview.rst
deleted file mode 100644
index 02651d7e53..0000000000
--- a/docs/report/dpdk_performance_tests/overview.rst
+++ /dev/null
@@ -1,240 +0,0 @@
-Overview
-========
-
-Tested Physical Topologies
---------------------------
-
-CSIT DPDK performance tests are executed on physical baremetal servers hosted
-by :abbr:`LF (Linux Foundation)` FD.io project. Testbed physical topology is
-shown in the figure below.::
-
- +------------------------+ +------------------------+
- | | | |
- | +------------------+ | | +------------------+ |
- | | | | | | | |
- | | <-----------------> | |
- | | DUT1 | | | | DUT2 | |
- | +--^---------------+ | | +---------------^--+ |
- | | | | | |
- | | SUT1 | | SUT2 | |
- +------------------------+ +------------------^-----+
- | |
- | |
- | +-----------+ |
- | | | |
- +------------------> TG <------------------+
- | |
- +-----------+
-
-SUT1 and SUT2 are two System Under Test servers (Cisco UCS C240, each with two
-Intel XEON CPUs), TG is a Traffic Generator (TG, another Cisco UCS C240, with
-two Intel XEON CPUs). SUTs run Testpmd/L3FWD SW SW application in Linux
-user-mode as a Device Under Test (DUT). TG runs TRex SW application as a packet
-Traffic Generator. Physical connectivity between SUTs and to TG is provided
-using different NIC models that need to be tested for performance. Currently
-installed and tested NIC models include:
-
-#. 2port10GE X520-DA2 Intel.
-#. 2port10GE X710 Intel.
-#. 2port10GE VIC1227 Cisco.
-#. 2port40GE VIC1385 Cisco.
-#. 2port40GE XL710 Intel.
-
-From SUT and DUT perspective, all performance tests involve forwarding packets
-between two physical Ethernet ports (10GE or 40GE). Due to the number of
-listed NIC models tested and available PCI slot capacity in SUT servers, in
-all of the above cases both physical ports are located on the same NIC. In
-some test cases this results in measured packet throughput being limited not
-by VPP DUT but by either the physical interface or the NIC capacity.
-
-Going forward CSIT project will be looking to add more hardware into FD.io
-performance labs to address larger scale multi-interface and multi-NIC
-performance testing scenarios.
-
-Note that reported DUT (DPDK) performance results are specific to the SUTs
-tested. Current :abbr:`LF (Linux Foundation)` FD.io SUTs are based on Intel
-XEON E5-2699v3 2.3GHz CPUs. SUTs with other CPUs are likely to yield different
-results. A good rule of thumb, that can be applied to estimate DPDK packet
-thoughput for Phy-to-Phy (NIC-to-NIC, PCI-to-PCI) topology, is to expect
-the forwarding performance to be proportional to CPU core frequency,
-assuming CPU is the only limiting factor and all other SUT parameters
-equivalent to FD.io CSIT environment. The same rule of thumb can be also
-applied for Phy-to-VM/LXC-to-Phy (NIC-to-VM/LXC-to-NIC) topology, but due to
-much higher dependency on intensive memory operations and sensitivity to Linux
-kernel scheduler settings and behaviour, this estimation may not always yield
-good enough accuracy.
-
-For detailed :abbr:`LF (Linux Foundation)` FD.io test bed specification and
-physical topology please refer to `LF FD.io CSIT testbed wiki page
-<https://wiki.fd.io/view/CSIT/CSIT_LF_testbed>`_.
-
-Performance Tests Coverage
---------------------------
-
-Performance tests are split into two main categories:
-
-- Throughput discovery - discovery of packet forwarding rate using binary search
- in accordance to :rfc:`2544`.
-
- - NDR - discovery of Non Drop Rate packet throughput, at zero packet loss;
- followed by one-way packet latency measurements at 10%, 50% and 100% of
- discovered NDR throughput.
- - PDR - discovery of Partial Drop Rate, with specified non-zero packet loss
- currently set to 0.5%; followed by one-way packet latency measurements at
- 100% of discovered PDR throughput.
-
-- Throughput verification - verification of packet forwarding rate against
- previously discovered throughput rate. These tests are currently done against
- 0.9 of reference NDR, with reference rates updated periodically.
-
-CSIT |release| includes following performance test suites, listed per NIC type:
-
-- 2port10GE X520-DA2 Intel
-
- - **L2IntLoop** - L2 Interface Loop forwarding any Ethernet frames between
- two Interfaces.
-
-- 2port40GE XL710 Intel
-
- - **L2IntLoop** - L2 Interface Loop forwarding any Ethernet frames between
- two Interfaces.
-
-- 2port10GE X520-DA2 Intel
-
- - **IPv4 Routed Forwarding** - L3 IP forwarding of Ethernet frames between
- two Interfaces.
-
-Execution of performance tests takes time, especially the throughput discovery
-tests. Due to limited HW testbed resources available within FD.io labs hosted
-by Linux Foundation, the number of tests for NICs other than X520 (a.k.a.
-Niantic) has been limited to few baseline tests. Over time we expect the HW
-testbed resources to grow, and will be adding complete set of performance
-tests for all models of hardware to be executed regularly and(or)
-continuously.
-
-Performance Tests Naming
-------------------------
-
-CSIT |release| follows a common structured naming convention for all performance
-and system functional tests, introduced in CSIT |release-1|.
-
-The naming should be intuitive for majority of the tests. Complete description
-of CSIT test naming convention is provided on `CSIT test naming wiki
-<https://wiki.fd.io/view/CSIT/csit-test-naming>`_.
-
-Methodology: Multi-Core and Multi-Threading
--------------------------------------------
-
-**Intel Hyper-Threading** - CSIT |release| performance tests are executed with
-SUT servers' Intel XEON processors configured in Intel Hyper-Threading Disabled
-mode (BIOS setting). This is the simplest configuration used to establish
-baseline single-thread single-core application packet processing and forwarding
-performance. Subsequent releases of CSIT will add performance tests with Intel
-Hyper-Threading Enabled (requires BIOS settings change and hard reboot of
-server).
-
-**Multi-core Tests** - CSIT |release| multi-core tests are executed in the
-following VPP thread and core configurations:
-
-#. 1t1c - 1 pmd worker thread on 1 CPU physical core.
-#. 2t2c - 2 pmd worker threads on 2 CPU physical cores.
-
-Note that in many tests running Testpmd/L3FWD reaches tested NIC I/O bandwidth
-or packets-per-second limit.
-
-Methodology: Packet Throughput
-------------------------------
-
-Following values are measured and reported for packet throughput tests:
-
-- NDR binary search per :rfc:`2544`:
-
- - Packet rate: "RATE: <aggregate packet rate in packets-per-second> pps
- (2x <per direction packets-per-second>)"
- - Aggregate bandwidth: "BANDWIDTH: <aggregate bandwidth in Gigabits per
- second> Gbps (untagged)"
-
-- PDR binary search per :rfc:`2544`:
-
- - Packet rate: "RATE: <aggregate packet rate in packets-per-second> pps (2x
- <per direction packets-per-second>)"
- - Aggregate bandwidth: "BANDWIDTH: <aggregate bandwidth in Gigabits per
- second> Gbps (untagged)"
- - Packet loss tolerance: "LOSS_ACCEPTANCE <accepted percentage of packets
- lost at PDR rate>""
-
-- NDR and PDR are measured for the following L2 frame sizes:
-
- - IPv4: 64B, 1518B, 9000B.
-
-All rates are reported from external Traffic Generator perspective.
-
-
-Methodology: Packet Latency
----------------------------
-
-TRex Traffic Generator (TG) is used for measuring latency of Testpmd DUTs.
-Reported latency values are measured using following methodology:
-
-- Latency tests are performed at 10%, 50% of discovered NDR rate (non drop rate)
- for each NDR throughput test and packet size (except IMIX).
-- TG sends dedicated latency streams, one per direction, each at the rate of
- 10kpps at the prescribed packet size; these are sent in addition to the main
- load streams.
-- TG reports min/avg/max latency values per stream direction, hence two sets
- of latency values are reported per test case; future release of TRex is
- expected to report latency percentiles.
-- Reported latency values are aggregate across two SUTs due to three node
- topology used for all performance tests; for per SUT latency, reported value
- should be divided by two.
-- 1usec is the measurement accuracy advertised by TRex TG for the setup used in
- FD.io labs used by CSIT project.
-- TRex setup introduces an always-on error of about 2*2usec per latency flow -
- additonal Tx/Rx interface latency induced by TRex SW writing and reading
- packet timestamps on CPU cores without HW acceleration on NICs closer to the
- interface line.
-
-Methodology: TRex Traffic Generator Usage
------------------------------------------
-
-The `TRex traffic generator <https://wiki.fd.io/view/TRex>`_ is used for all
-CSIT performance tests. TRex stateless mode is used to measure NDR and PDR
-throughputs using binary search (NDR and PDR discovery tests) and for quick
-checks of DUT performance against the reference NDRs (NDR check tests) for
-specific configuration.
-
-TRex is installed and run on the TG compute node. The typical procedure is:
-
-- If the TRex is not already installed on TG, it is installed in the
- suite setup phase - see `TRex intallation`_.
-- TRex configuration is set in its configuration file
- ::
-
- /etc/trex_cfg.yaml
-
-- TRex is started in the background mode
- ::
-
- $ sh -c 'cd <t-rex-install-dir>/scripts/ && sudo nohup ./t-rex-64 -i -c 7 --iom 0 > /tmp/trex.log 2>&1 &' > /dev/null
-
-- There are traffic streams dynamically prepared for each test, based on traffic
- profiles. The traffic is sent and the statistics obtained using
- :command:`trex_stl_lib.api.STLClient`.
-
-**Measuring packet loss**
-
-- Create an instance of STLClient
-- Connect to the client
-- Add all streams
-- Clear statistics
-- Send the traffic for defined time
-- Get the statistics
-
-If there is a warm-up phase required, the traffic is sent also before test and
-the statistics are ignored.
-
-**Measuring latency**
-
-If measurement of latency is requested, two more packet streams are created (one
-for each direction) with TRex flow_stats parameter set to STLFlowLatencyStats. In
-that case, returned statistics will also include min/avg/max latency values.
diff --git a/docs/report/dpdk_performance_tests/packet_latency_graphs/index.rst b/docs/report/dpdk_performance_tests/packet_latency_graphs/index.rst
deleted file mode 100644
index b99bc2b1f6..0000000000
--- a/docs/report/dpdk_performance_tests/packet_latency_graphs/index.rst
+++ /dev/null
@@ -1,28 +0,0 @@
-Packet Latency Graphs
-=====================
-
-Plotted results are generated from a single execution of CSIT NDR discovery
-test. Box plots are used to show the Minimum, Median and Maximum packet
-latency per test.
-
-*Title of each graph* is a regex (regular expression) matching all
-throughput test cases plotted on this graph, *X-axis labels* are indices
-of individual test suites executed by
-`FD.io test executor dpdk performance jobs`_ that created result output file
-used as data source for the graph, *Y-axis labels* are measured packet Latency
-[uSec] values, and the *Graph legend* lists the plotted test suites and their
-indices. Latency is reported for concurrent symmetric bi-directional flows,
-separately for each direction: i) West-to-East:
-TGint1-to-SUT1-to-SUT2-to-TGint2, and ii) East-to-West:
-TGint2-to-SUT2-to-SUT1-to-TGint1.
-
-.. note::
-
- Test results have been generated by
- `FD.io test executor dpdk performance jobs`_ with Robot Framework result
- files csit-dpdk-perf-\*.zip `archived here <../../_static/archive/>`_.
-
-.. toctree::
-
- l2
- ip4
diff --git a/docs/report/dpdk_performance_tests/packet_latency_graphs/ip4.rst b/docs/report/dpdk_performance_tests/packet_latency_graphs/ip4.rst
deleted file mode 100644
index 0bccc4b920..0000000000
--- a/docs/report/dpdk_performance_tests/packet_latency_graphs/ip4.rst
+++ /dev/null
@@ -1,73 +0,0 @@
-IPv4 Routed-Forwarding
-======================
-
-This section includes summary graphs of L3FWD Phy-to-Phy performance with packet
-routed forwarding measured at 50% of discovered NDR throughput rate. Latency is
-reported for L3FWD running in multiple configurations of L3FWD pmd thread(s),
-a.k.a. L3FWD data plane thread(s), and their physical CPU core(s) placement.
-
-L3FWD packet latency - running in configuration of **one worker thread (1t) on one
-physical core (1c)** - is presented in the figure below.
-
-.. raw:: html
-
- <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/dpdk/64B-1t1c-ipv4-ndrdisc-lat50.html"></iframe>
-
-.. raw:: latex
-
- \begin{figure}[H]
- \centering
- \graphicspath{{../_build/_static/dpdk/}}
- \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{64B-1t1c-ipv4-ndrdisc-lat50}
- \label{fig:64B-1t1c-ipv4-ndrdisc-lat50-dpdk}
- \end{figure}
-
-*Figure 1. L3FWD 1thread 1core - packet latency for Phy-to-Phy IPv4 Routed-Forwarding.*
-
-CSIT test cases used to generate results presented above can be found in CSIT
-git repository by filtering with specified regex as follows:
-
-.. only:: html
-
- .. program-output:: cd ../../../../../ && set +x && cd tests/dpdk/perf && grep -E '64B-1t1c-ethip4-ip4base-l3fwd-ndrdisc' *
- :shell:
-
-.. only:: latex
-
- .. code-block:: bash
-
- $ cd tests/dpdk/perf
- $ grep -E '64B-1t1c-ethip4-ip4base-l3fwd-ndrdisc' *
-
-Testpmd packet latency - running in configuration of **two worker threads (2t)
-on two physical cores (2c)** - is presented in the figure below.
-
-.. raw:: html
-
- <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/dpdk/64B-2t2c-ipv4-ndrdisc-lat50.html"></iframe>
-
-.. raw:: latex
-
- \begin{figure}[H]
- \centering
- \graphicspath{{../_build/_static/dpdk/}}
- \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{64B-2t2c-ipv4-ndrdisc-lat50}
- \label{fig:64B-2t2c-ipv4-ndrdisc-lat50-dpdk}
- \end{figure}
-
-*Figure 2. L3FWD 2thread 2core - packet latency for Phy-to-Phy IPv4 Routed-Forwarding.*
-
-CSIT test cases used to generate results presented above can be found in CSIT
-git repository by filtering with specified regex as follows:
-
-.. only:: html
-
- .. program-output:: cd ../../../../../ && set +x && cd tests/dpdk/perf && grep -E '64B-2t2c-ethip4-ip4base-l3fwd-ndrdisc' *
- :shell:
-
-.. only:: latex
-
- .. code-block:: bash
-
- $ cd tests/dpdk/perf
- $ grep -E '64B-2t2c-ethip4-ip4base-l3fwd-ndrdisc' *
diff --git a/docs/report/dpdk_performance_tests/packet_latency_graphs/l2.rst b/docs/report/dpdk_performance_tests/packet_latency_graphs/l2.rst
deleted file mode 100644
index bcb7c4468c..0000000000
--- a/docs/report/dpdk_performance_tests/packet_latency_graphs/l2.rst
+++ /dev/null
@@ -1,74 +0,0 @@
-L2 Ethernet Interface Loop
-==========================
-
-This section includes summary graphs of Testpmd Phy-to-Phy packet
-latency with L2 Ethernet Interface Loop measured at 50% of discovered
-NDR throughput rate. Latency is reported for Testpmd running in multiple
-configurations of Testpmd pmd thread(s), a.k.a. Testpmd data plane
-thread(s), and their physical CPU core(s) placement.
-
-Testpmd packet latency - running in configuration of **one worker thread (1t) on one
-physical core (1c)** - is presented in the figure below.
-
-.. raw:: html
-
- <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/dpdk/64B-1t1c-l2-ndrdisc-lat50.html"></iframe>
-
-.. raw:: latex
-
- \begin{figure}[H]
- \centering
- \graphicspath{{../_build/_static/dpdk/}}
- \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{64B-1t1c-l2-ndrdisc-lat50}
- \label{fig:64B-1t1c-l2-ndrdisc-lat50-dpdk}
- \end{figure}
-
-*Figure 1. Testpmd 1thread 1core - packet latency for Phy-to-Phy L2 Ethernet Looping.*
-
-CSIT test cases used to generate results presented above can be found in CSIT
-git repository by filtering with specified regex as follows:
-
-.. only:: html
-
- .. program-output:: cd ../../../../../ && set +x && cd tests/dpdk/perf && grep -P '64B-1t1c-eth-l2xcbase-testpmd-ndrdisc' *
- :shell:
-
-.. only:: latex
-
- .. code-block:: bash
-
- $ cd tests/dpdk/perf
- $ grep -P '64B-1t1c-eth-l2xcbase-testpmd-ndrdisc' *
-
-Testpmd packet latency - running in configuration of **two worker threads (2t)
-on two physical cores (2c)** - is presented in the figure below.
-
-.. raw:: html
-
- <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/dpdk/64B-2t2c-l2-ndrdisc-lat50.html"></iframe>
-
-.. raw:: latex
-
- \begin{figure}[H]
- \centering
- \graphicspath{{../_build/_static/dpdk/}}
- \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{64B-2t2c-l2-ndrdisc-lat50}
- \label{fig:64B-2t2c-l2-ndrdisc-lat50-dpdk}
- \end{figure}
-
-*Figure 2. Testpmd 2thread 2core - packet latency for Phy-to-Phy L2 Ethernet Looping.*
-
-CSIT test cases used to generate results presented above can be found in CSIT
-git repository by filtering with specified regex as follows:
-
-.. only:: html
-
- .. program-output:: cd ../../../../../ && set +x && cd tests/dpdk/perf && grep -P '64B-2t2c-eth-l2xcbase-testpmd-ndrdisc' *
- :shell:
-
-.. only:: latex
-
- .. code-block:: bash
-
- $ cd tests/dpdk/perf
- $ grep -P '64B-2t2c-eth-l2xcbase-testpmd-ndrdisc' *
diff --git a/docs/report/dpdk_performance_tests/packet_throughput_graphs/index.rst b/docs/report/dpdk_performance_tests/packet_throughput_graphs/index.rst
deleted file mode 100644
index c1fc3f1bbc..0000000000
--- a/docs/report/dpdk_performance_tests/packet_throughput_graphs/index.rst
+++ /dev/null
@@ -1,39 +0,0 @@
-Packet Throughput Graphs
-========================
-
-Plotted results are generated by multiple executions of the same CSIT
-performance tests across three physical testbeds within LF FD.io labs.
-To provide a descriptive summary view, Box-and-Whisker plots are used to
-display variation in measured throughput values, without making any
-assumptions of the underlying statistical distribution.
-
-For each plotted test case, Box-and-Whisker plots show the quartiles
-(Min, 1st quartile / 25th percentile, 2nd quartile / 50th percentile /
-mean, 3rd quartile / 75th percentile, Max) across collected data set
-(data set size stated in the note below). Outliers are plotted as
-individual points. Min and max values are plotted as bottom and top
-Whiskers respectively. 2nd and 3rd quartiles are plotted as bottom and
-top edge of the box. If multiple samples match only two values, and all
-samples fall between them, then no whiskers are plotted. If all samples
-have the same value, only a horizontal line is plotted.
-
-*Title of each graph* is a regex (regular expression) matching all
-throughput test cases plotted on this graph, *X-axis labels* are indices
-of individual test suites executed by
-`FD.io test executor dpdk performance jobs`_ jobs that created result output
-files used as data sources for the graph, *Y-axis labels* are measured Packets
-Per Second [pps] values, and the *Graph legend* lists the plotted test suites
-and their indices.
-
-.. note::
-
- Test results have been generated by
- `FD.io test executor dpdk performance jobs`_ with Robot Framework result
- files csit-dpdk-perf-\*.zip `archived here <../../_static/archive/>`_.
- Plotted data set size per test case is equal to the number of job executions
- presented in this report version: **10**.
-
-.. toctree::
-
- l2
- ip4
diff --git a/docs/report/dpdk_performance_tests/packet_throughput_graphs/ip4.rst b/docs/report/dpdk_performance_tests/packet_throughput_graphs/ip4.rst
deleted file mode 100644
index 1ad08992bc..0000000000
--- a/docs/report/dpdk_performance_tests/packet_throughput_graphs/ip4.rst
+++ /dev/null
@@ -1,150 +0,0 @@
-IPv4 Routed-Forwarding
-======================
-
-Following sections include summary graphs ofL3FWD Phy-to-Phy performance with
-packet routed forwarding, including NDR throughput (zero packet loss)
-and PDR throughput (<0.5% packet loss). Performance is reported for L3FWD
-running in multiple configurations of L3FWD pmd thread(s), a.k.a. L3FWD
-data plane thread(s), and their physical CPU core(s) placement.
-
-NDR Throughput
-~~~~~~~~~~~~~~
-
-Testpmd NDR 64B packet throughput in 1t1c setup (1thread, 1core) is presented
-in the graph below.
-
-.. raw:: html
-
- <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/dpdk/64B-1t1c-ipv4-ndrdisc.html"></iframe>
-
-.. raw:: latex
-
- \begin{figure}[H]
- \centering
- \graphicspath{{../_build/_static/dpdk/}}
- \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{64B-1t1c-ipv4-ndrdisc}
- \label{fig:64B-1t1c-ipv4-ndrdisc-dpdk}
- \end{figure}
-
-*Figure 1. L3FWD 1thread 1core - NDR Throughput for Phy-to-Phy IPv4 Routed-Forwarding
-Looping.*
-
-CSIT source code for the test cases used for above plots can be found in CSIT
-git repository:
-
-.. only:: html
-
- .. program-output:: cd ../../../../../ && set +x && cd tests/dpdk/perf && grep -P '64B-1t1c-ethip4-ip4base-l3fwd-ndrdisc' *
- :shell:
-
-.. only:: latex
-
- .. code-block:: bash
-
- $ cd tests/dpdk/perf
- $ grep -P '64B-1t1c-ethip4-ip4base-l3fwd-ndrdisc' *
-
-Testpmd NDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented
-in the graph below.
-
-.. raw:: html
-
- <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/dpdk/64B-2t2c-ipv4-ndrdisc.html"></iframe>
-
-.. raw:: latex
-
- \begin{figure}[H]
- \centering
- \graphicspath{{../_build/_static/dpdk/}}
- \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{64B-2t2c-ipv4-ndrdisc}
- \label{fig:64B-2t2c-ipv4-ndrdisc-dpdk}
- \end{figure}
-
-*Figure 2. L3FWD 2threads 2cores - NDR Throughput for Phy-to-Phy IPv4 Routed-Forwarding
-Looping.*
-
-CSIT source code for the test cases used for above plots can be found in CSIT
-git repository:
-
-.. only:: html
-
- .. program-output:: cd ../../../../../ && set +x && cd tests/dpdk/perf && grep -P '64B-2t2c-ethip4-ip4base-l3fwd-ndrdisc' *
- :shell:
-
-.. only:: latex
-
- .. code-block:: bash
-
- $ cd tests/dpdk/perf
- $ grep -P '64B-2t2c-ethip4-ip4base-l3fwd-ndrdisc' *
-
-PDR Throughput
-~~~~~~~~~~~~~~
-
-L3FWD PDR 64B packet throughput in 1t1c setup (1thread, 1core) is presented
-in the graph below. PDR measured for 0.5% packet loss ratio.
-
-.. raw:: html
-
- <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/dpdk/64B-1t1c-ipv4-pdrdisc.html"></iframe>
-
-.. raw:: latex
-
- \begin{figure}[H]
- \centering
- \graphicspath{{../_build/_static/dpdk/}}
- \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{64B-1t1c-ipv4-pdrdisc}
- \label{fig:64B-1t1c-ipv4-pdrdisc-dpdk}
- \end{figure}
-
-*Figure 3. L3FWD 1thread 1core - PDR Throughput for Phy-to-Phy IPv4 Routed-Forwarding
-Looping.*
-
-CSIT source code for the test cases used for above plots can be found in CSIT
-git repository:
-
-.. only:: html
-
- .. program-output:: cd ../../../../../ && set +x && cd tests/dpdk/perf && grep -P '64B-1t1c-ethip4-ip4base-l3fwd-pdrdisc' *
- :shell:
-
-.. only:: latex
-
- .. code-block:: bash
-
- $ cd tests/dpdk/perf
- $ grep -P '64B-1t1c-ethip4-ip4base-l3fwd-pdrdisc' *
-
-L3FWD PDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented
-in the graph below. PDR measured for 0.5% packet loss ratio.
-
-.. raw:: html
-
- <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/dpdk/64B-2t2c-ipv4-pdrdisc.html"></iframe>
-
-.. raw:: latex
-
- \begin{figure}[H]
- \centering
- \graphicspath{{../_build/_static/dpdk/}}
- \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{64B-2t2c-ipv4-pdrdisc}
- \label{fig:64B-2t2c-ipv4-pdrdisc-dpdk}
- \end{figure}
-
-*Figure 4. L3FWD 2thread 2core - PDR Throughput for Phy-to-Phy IPv4 Routed-Forwarding
-Looping.*
-
-CSIT source code for the test cases used for above plots can be found in CSIT
-git repository:
-
-.. only:: html
-
- .. program-output:: cd ../../../../../ && set +x && cd tests/dpdk/perf && grep -P '64B-2t2c-ethip4-ip4base-l3fwd-pdrdisc' *
- :shell:
-
-.. only:: latex
-
- .. code-block:: bash
-
- $ cd tests/dpdk/perf
- $ grep -P '64B-2t2c-ethip4-ip4base-l3fwd-pdrdisc' *
diff --git a/docs/report/dpdk_performance_tests/packet_throughput_graphs/l2.rst b/docs/report/dpdk_performance_tests/packet_throughput_graphs/l2.rst
deleted file mode 100644
index d96bf9f37d..0000000000
--- a/docs/report/dpdk_performance_tests/packet_throughput_graphs/l2.rst
+++ /dev/null
@@ -1,150 +0,0 @@
-L2 Ethernet Interface Loop
-==========================
-
-Following sections include summary graphs of DPDK Testpmd Phy-to-Phy performance
-with L2 Ethernet Interface Loop, including NDR throughput (zero packet loss)
-and PDR throughput (<0.5% packet loss). Performance is reported for Testpmd
-running in multiple configurations of Testpmd pmd thread(s), a.k.a. Testpmd
-data plane thread(s), and their physical CPU core(s) placement.
-
-NDR Throughput
-~~~~~~~~~~~~~~
-
-Testpmd NDR 64B packet throughput in 1t1c setup (1thread, 1core) is presented
-in the graph below.
-
-.. raw:: html
-
- <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/dpdk/64B-1t1c-l2-ndrdisc.html"></iframe>
-
-.. raw:: latex
-
- \begin{figure}[H]
- \centering
- \graphicspath{{../_build/_static/dpdk/}}
- \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{64B-1t1c-l2-ndrdisc}
- \label{fig:64B-1t1c-l2-ndrdisc-dpdk}
- \end{figure}
-
-*Figure 1. Testpmd 1thread 1core - NDR Throughput for Phy-to-Phy L2 Ethernet
-Looping.*
-
-CSIT source code for the test cases used for above plots can be found in CSIT
-git repository:
-
-.. only:: html
-
- .. program-output:: cd ../../../../../ && set +x && cd tests/dpdk/perf && grep -P '64B-1t1c-eth-l2xcbase-testpmd-ndrdisc' *
- :shell:
-
-.. only:: latex
-
- .. code-block:: bash
-
- $ cd tests/dpdk/perf
- $ grep -P '64B-1t1c-eth-l2xcbase-testpmd-ndrdisc' *
-
-Testpmd NDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented
-in the graph below.
-
-.. raw:: html
-
- <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/dpdk/64B-2t2c-l2-ndrdisc.html"></iframe>
-
-.. raw:: latex
-
- \begin{figure}[H]
- \centering
- \graphicspath{{../_build/_static/dpdk/}}
- \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{64B-2t2c-l2-ndrdisc}
- \label{fig:64B-2t2c-l2-ndrdisc-dpdk}
- \end{figure}
-
-*Figure 2. Testpmd 2threads 2cores - NDR Throughput for Phy-to-Phy L2 Ethernet
-Looping.*
-
-CSIT source code for the test cases used for above plots can be found in CSIT
-git repository:
-
-.. only:: html
-
- .. program-output:: cd ../../../../../ && set +x && cd tests/dpdk/perf && grep -P '64B-2t2c-eth-l2xcbase-testpmd-ndrdisc' *
- :shell:
-
-.. only:: latex
-
- .. code-block:: bash
-
- $ cd tests/dpdk/perf
- $ grep -P '64B-2t2c-eth-l2xcbase-testpmd-ndrdisc' *
-
-PDR Throughput
-~~~~~~~~~~~~~~
-
-Testpmd PDR 64B packet throughput in 1t1c setup (1thread, 1core) is presented
-in the graph below. PDR measured for 0.5% packet loss ratio.
-
-.. raw:: html
-
- <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/dpdk/64B-1t1c-l2-pdrdisc.html"></iframe>
-
-.. raw:: latex
-
- \begin{figure}[H]
- \centering
- \graphicspath{{../_build/_static/dpdk/}}
- \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{64B-1t1c-l2-pdrdisc}
- \label{fig:64B-1t1c-l2-pdrdisc-dpdk}
- \end{figure}
-
-*Figure 3. Testpmd 1thread 1core - PDR Throughput for Phy-to-Phy L2 Ethernet
-Looping.*
-
-CSIT source code for the test cases used for above plots can be found in CSIT
-git repository:
-
-.. only:: html
-
- .. program-output:: cd ../../../../../ && set +x && cd tests/dpdk/perf && grep -P '64B-1t1c-eth-l2xcbase-testpmd-pdrdisc' *
- :shell:
-
-.. only:: latex
-
- .. code-block:: bash
-
- $ cd tests/dpdk/perf
- $ grep -P '64B-1t1c-eth-l2xcbase-testpmd-pdrdisc' *
-
-Testpmd PDR 64B packet throughput in 2t2c setup (2thread, 2core) is presented
-in the graph below. PDR measured for 0.5% packet loss ratio.
-
-.. raw:: html
-
- <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/dpdk/64B-2t2c-l2-pdrdisc.html"></iframe>
-
-.. raw:: latex
-
- \begin{figure}[H]
- \centering
- \graphicspath{{../_build/_static/dpdk/}}
- \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{64B-2t2c-l2-pdrdisc}
- \label{fig:64B-2t2c-l2-pdrdisc-dpdk}
- \end{figure}
-
-*Figure 4. Testpmd 2thread 2core - PDR Throughput for Phy-to-Phy L2 Ethernet
-Looping.*
-
-CSIT source code for the test cases used for above plots can be found in CSIT
-git repository:
-
-.. only:: html
-
- .. program-output:: cd ../../../../../ && set +x && cd tests/dpdk/perf && grep -P '64B-2t2c-eth-l2xcbase-testpmd-pdrdisc' *
- :shell:
-
-.. only:: latex
-
- .. code-block:: bash
-
- $ cd tests/dpdk/perf
- $ grep -P '64B-2t2c-eth-l2xcbase-testpmd-pdrdisc' *
diff --git a/docs/report/dpdk_performance_tests/test_environment.rst b/docs/report/dpdk_performance_tests/test_environment.rst
deleted file mode 100644
index eb6617b11c..0000000000
--- a/docs/report/dpdk_performance_tests/test_environment.rst
+++ /dev/null
@@ -1,68 +0,0 @@
-.. include:: ../vpp_performance_tests/test_environment_intro.rst
-
-.. include:: ../vpp_performance_tests/test_environment_sut_conf_1.rst
-
-.. include:: ../vpp_performance_tests/test_environment_sut_conf_3.rst
-
-
-DUT Configuration - DPDK
-------------------------
-
-**DPDK Version**
-
-|dpdk-release|
-
-**DPDK Compile Parameters**
-
-.. code-block:: bash
-
- make install T=x86_64-native-linuxapp-gcc -j
-
-**Testpmd Startup Configuration**
-
-Testpmd startup configuration changes per test case with different settings for CPU
-cores, rx-queues. Startup config is aligned with applied test case tag:
-
-Tagged by **1T1C**
-
-.. code-block:: bash
-
- testpmd -c 0x3 -n 4 -- --numa --nb-ports=2 --portmask=0x3 --nb-cores=1 --max-pkt-len=9000 --txqflags=0 --forward-mode=io --rxq=1 --txq=1 --burst=64 --rxd=1024 --txd=1024 --disable-link-check --auto-start
-
-Tagged by **2T2C**
-
-.. code-block:: bash
-
- testpmd -c 0x403 -n 4 -- --numa --nb-ports=2 --portmask=0x3 --nb-cores=2 --max-pkt-len=9000 --txqflags=0 --forward-mode=io --rxq=1 --txq=1 --burst=64 --rxd=1024 --txd=1024 --disable-link-check --auto-start
-
-Tagged by **4T4C**
-
-.. code-block:: bash
-
- testpmd -c 0xc07 -n 4 -- --numa --nb-ports=2 --portmask=0x3 --nb-cores=4 --max-pkt-len=9000 --txqflags=0 --forward-mode=io --rxq=2 --txq=2 --burst=64 --rxd=1024 --txd=1024 --disable-link-check --auto-start
-
-**L3FWD Startup Configuration**
-
-L3FWD startup configuration changes per test case with different settings for CPU
-cores, rx-queues. Startup config is aligned with applied test case tag:
-
-Tagged by **1T1C**
-
-.. code-block:: bash
-
- l3fwd -l 1 -n 4 -- -P -L -p 0x3 --config='${port_config}' --enable-jumbo --max-pkt-len=9000 --eth-dest=0,${adj_mac0} --eth-dest=1,${adj_mac1} --parse-ptype
-
-Tagged by **2T2C**
-
-.. code-block:: bash
-
- l3fwd -l 1,2 -n 4 -- -P -L -p 0x3 --config='${port_config}' --enable-jumbo --max-pkt-len=9000 --eth-dest=0,${adj_mac0} --eth-dest=1,${adj_mac1} --parse-ptype
-
-Tagged by **4T4C**
-
-.. code-block:: bash
-
- l3fwd -l 1,2,3,4 -n 4 -- -P -L -p 0x3 --config='${port_config}' --enable-jumbo --max-pkt-len=9000 --eth-dest=0,${adj_mac0} --eth-dest=1,${adj_mac1} --parse-ptype
-
-
-.. include:: ../vpp_performance_tests/test_environment_tg.rst
diff --git a/docs/report/honeycomb_performance_tests/csit_release_notes.rst b/docs/report/honeycomb_performance_tests/csit_release_notes.rst
deleted file mode 100644
index d79907d4ca..0000000000
--- a/docs/report/honeycomb_performance_tests/csit_release_notes.rst
+++ /dev/null
@@ -1,20 +0,0 @@
-CSIT Release Notes
-==================
-
-Changes in CSIT |release|
--------------------------
-
-#. No changes since previous release.
-
-Known Issues
-------------
-
-Here is the list of known issues in CSIT |release| for Honeycomb performance
-tests in VIRL:
-
-+---+--------------------------------------------+------------+----------------------------------------------------------------------------+
-| # | Issue | Jira ID | Description |
-+---+--------------------------------------------+------------+----------------------------------------------------------------------------+
-| 1 | Intermittent failures in Honeycomb startup | HC2VPP-199 | During test setup Honeycomb sometimes fails to start. Does not appear to |
-| | | | affect measured results, only fails test execution ocassionally. |
-+---+--------------------------------------------+------------+----------------------------------------------------------------------------+
diff --git a/docs/report/honeycomb_performance_tests/documentation.rst b/docs/report/honeycomb_performance_tests/documentation.rst
deleted file mode 100644
index 6b15bde6ee..0000000000
--- a/docs/report/honeycomb_performance_tests/documentation.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-Documentation
-=============
-
-`CSIT Honeycomb Performance Tests Documentation`_ contains detailed
-functional description and input parameters for each test case.
diff --git a/docs/report/honeycomb_performance_tests/index.rst b/docs/report/honeycomb_performance_tests/index.rst
deleted file mode 100644
index 3177494395..0000000000
--- a/docs/report/honeycomb_performance_tests/index.rst
+++ /dev/null
@@ -1,11 +0,0 @@
-Honeycomb Performance Tests
-===========================
-
-.. toctree::
-
- overview
- csit_release_notes
- test_environment
- documentation
- test_result_data
-
diff --git a/docs/report/honeycomb_performance_tests/overview.rst b/docs/report/honeycomb_performance_tests/overview.rst
deleted file mode 100644
index ee9788ed26..0000000000
--- a/docs/report/honeycomb_performance_tests/overview.rst
+++ /dev/null
@@ -1,122 +0,0 @@
-Overview
-========
-
-Tested Physical Topologies
---------------------------
-
-CSIT VPP performance tests are executed on physical baremetal servers hosted by
-LF FD.io project. Testbed physical topology is shown in the figure below.::
-
- +------------------------+ +------------------------+
- | | | |
- | +------------------+ | | +------------------+ |
- | | | | | | | |
- | | <-----------------> | |
- | | DUT1 | | | | DUT2 | |
- | +--^---------------+ | | +---------------^--+ |
- | | | | | |
- | | SUT1 | | SUT2 | |
- +------------------------+ +------------------^-----+
- | |
- | |
- | +-----------+ |
- | | | |
- +------------------> TG <------------------+
- | |
- +-----------+
-
-SUT1 runs VPP SW application in Linux user-mode as a
-Device Under Test (DUT), and a python script to generate traffic. SUT2 and TG
-are unused.
-sical connectivity between SUTs and to TG is provided using
-different NIC model. Currently installed NIC models include:
-
-Performance tests involve sending Netconf requests over localhost to the
-Honeycomb listener port, and measuring response time.
-
-Note that reported performance results are specific to the SUTs tested.
-Current LF FD.io SUTs are based on Intel XEON E5-2699v3 2.3GHz CPUs. SUTs with
-other CPUs are likely to yield different results.
-
-For detailed LF FD.io test bed specification and physical topology please refer
-to `LF FDio CSIT testbed wiki page
-<https://wiki.fd.io/view/CSIT/CSIT_LF_testbed>`_.
-
-Performance Tests Coverage
---------------------------
-
-As of right now, there is only a single Honeycomb performance test. Measuring
-response time for a simple read operation, performed synchronously and using
-single (not batch) requests.
-
-Currently the tests do not trigger automatically, but can be run on-demand from
-the hc2vpp project.
-
-Performance Tests Naming
-------------------------
-
-CSIT |release| follows a common structured naming convention for all
-performance and system functional tests, introduced in CSIT |release-1|.
-
-The naming should be intuitive for majority of the tests. Complete
-description of CSIT test naming convention is provided on `CSIT test naming wiki
-<https://wiki.fd.io/view/CSIT/csit-test-naming>`_.
-
-Here few illustrative examples of the new naming usage for performance test
-suites:
-
-#. **Physical port to physical port - a.k.a. NIC-to-NIC, Phy-to-Phy, P2P**
-
- - *PortNICConfig-WireEncapsulation-PacketForwardingFunction-
- PacketProcessingFunction1-...-PacketProcessingFunctionN-TestType*
- - *10ge2p1x520-dot1q-l2bdbasemaclrn-ndrdisc.robot* => 2 ports of 10GE on
- Intel x520 NIC, dot1q tagged Ethernet, L2 bridge-domain baseline switching
- with MAC learning, NDR throughput discovery.
- - *10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-ndrchk.robot* => 2 ports of 10GE
- on Intel x520 NIC, IPv4 VXLAN Ethernet, L2 bridge-domain baseline
- switching with MAC learning, NDR throughput discovery.
- - *10ge2p1x520-ethip4-ip4base-ndrdisc.robot* => 2 ports of 10GE on Intel
- x520 NIC, IPv4 baseline routed forwarding, NDR throughput discovery.
- - *10ge2p1x520-ethip6-ip6scale200k-ndrdisc.robot* => 2 ports of 10GE on
- Intel x520 NIC, IPv6 scaled up routed forwarding, NDR throughput
- discovery.
-
-#. **Physical port to VM (or VM chain) to physical port - a.k.a. NIC2VM2NIC,
- P2V2P, NIC2VMchain2NIC, P2V2V2P**
-
- - *PortNICConfig-WireEncapsulation-PacketForwardingFunction-
- PacketProcessingFunction1-...-PacketProcessingFunctionN-VirtEncapsulation-
- VirtPortConfig-VMconfig-TestType*
- - *10ge2p1x520-dot1q-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc.robot* => 2 ports
- of 10GE on Intel x520 NIC, dot1q tagged Ethernet, L2 bridge-domain
- switching to/from two vhost interfaces and one VM, NDR throughput
- discovery.
- - *10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-2vhost-1vm-ndrdisc.robot* => 2
- ports of 10GE on Intel x520 NIC, IPv4 VXLAN Ethernet, L2 bridge-domain
- switching to/from two vhost interfaces and one VM, NDR throughput
- discovery.
- - *10ge2p1x520-ethip4vxlan-l2bdbasemaclrn-eth-4vhost-2vm-ndrdisc.robot* => 2
- ports of 10GE on Intel x520 NIC, IPv4 VXLAN Ethernet, L2 bridge-domain
- switching to/from four vhost interfaces and two VMs, NDR throughput
- discovery.
-
-Methodology: Multi-Core
------------------------
-
-**Multi-core Test** - CSIT |release| multi-core tests are executed in the
-following thread and core configurations:
-
-#. 1t - 1 Honeycomb Netconf thread on 1 CPU physical core.
-#. 8t - 8 Honeycomb Netconf thread on 8 CPU physical core.
-#. 16t - 16 Honeycomb Netconf thread on 16 CPU physical core.
-
-Traffic generator also uses multiple threads/cores, to simulate multiple
-Netconf clients accessing the Honeycomb server.
-
-Methodology: Performance measurement
-------------------------------------
-
-The following values are measured and reported in tests:
-
-- Average request rate. Averaged over the entire test duration, over all client
- threads. Negative replies (if any) are not counted and are reported separately.
diff --git a/docs/report/honeycomb_performance_tests/test_environment.rst b/docs/report/honeycomb_performance_tests/test_environment.rst
deleted file mode 100644
index db187b2c1b..0000000000
--- a/docs/report/honeycomb_performance_tests/test_environment.rst
+++ /dev/null
@@ -1,22 +0,0 @@
-Test Environment
-================
-
-To execute performance tests, there are three identical testbeds, each testbed
-consists of two DUTs and one TG.
-
-Server HW Configuration
------------------------
-
-See `Performance HW Configuration <../vpp_performance_tests/test_environment.html>`_
-
-Additionally, configuration for the Honeycomb client:
-
-
-**Honeycomb Startup Command**
-
-Use the server mode JIT compiler, increase the default memory size,
-metaspace size, and enable NUMA optimizations for the JVM.
-
-::
-
- $ java -server -Xms128m -Xmx512m -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=512m -XX:+UseNUMA -XX:+UseParallelGC
diff --git a/docs/report/honeycomb_performance_tests/test_result_data.rst b/docs/report/honeycomb_performance_tests/test_result_data.rst
deleted file mode 100644
index 9e791d6e88..0000000000
--- a/docs/report/honeycomb_performance_tests/test_result_data.rst
+++ /dev/null
@@ -1,19 +0,0 @@
-Test Result Data
-================
-
-This section includes summary of Netconf read operation performance.
-Performance is reported for Honeycomb running in multiple configurations of
-netconf thread(s) and their physical CPU core(s) placement, and for different
-read operation targets.
-
-.. note::
-
- Test results have been generated by
- `FD.io test executor honeycomb performance jobs`_ with Robot Framework
- result files csit-vpp-perf-\*.zip `archived here <../../_static/archive/>`_.
-
-Honeycomb + Netconf
-===================
-
-No new data has been generated in the course of this release. Results from
-the last successful run are available in `17.07 release report <https://docs.fd.io/csit/rls1707/report/honeycomb_performance_tests/test_result_data.html>`_. \ No newline at end of file
diff --git a/docs/report/index.rst b/docs/report/index.rst
index bbcd5cbab8..abd4178833 100644
--- a/docs/report/index.rst
+++ b/docs/report/index.rst
@@ -1,18 +1,15 @@
-CSIT 18.01
-==========
+CSIT 18.01.2
+============
.. toctree::
:numbered:
introduction/index
vpp_performance_tests/index
- dpdk_performance_tests/index
- honeycomb_performance_tests/index
vpp_functional_tests/index
honeycomb_functional_tests/index
- vpp_unit_tests/index
nsh_sfc_functional_tests/index
detailed_test_results/index
test_configuration/index
test_operational_data/index
- csit_framework_documentation/index \ No newline at end of file
+ csit_framework_documentation/index
diff --git a/docs/report/introduction/general_notes.rst b/docs/report/introduction/general_notes.rst
index 994a53e6d0..48c20ad132 100644
--- a/docs/report/introduction/general_notes.rst
+++ b/docs/report/introduction/general_notes.rst
@@ -40,20 +40,6 @@ is listed separately, as follows:
orchestrated by Kubernetes, with `Ligato <https://github.com/ligato>`_ for
container networking. TRex is used as a traffic generator.
-#. **DPDK Performance** - VPP is using DPDK code to control and drive
- the NICs and physical interfaces. Tests are used as a baseline to
- profile performance of the DPDK sub-system. DPDK tests are executed in
- physical FD.io testbeds, focusing on Testpmd/L3FWD data plane performance for
- Phy-to-Phy (NIC-to-NIC). Tests cover a range of NICs, 10GE and 40GE
- interfaces, range of multi-thread and multi-core configurations.
- Testpmd/L3FWD application runs in host user-mode. TRex is used as a traffic
- generator.
-
-#. **Honeycomb Performance** - Honeycomb performance tests are executed in
- physical FD.io testbeds, focusing on the performance of Honeycomb management
- and programming functionality of VPP. Tests cover a range of CRUD operations
- executed against VPP.
-
#. **VPP Functional** - VPP functional tests are executed in virtual
FD.io testbeds focusing on VPP packet processing functionality, including
network data plane and in -line control plane. Tests cover vNIC-to-vNIC
diff --git a/docs/report/introduction/overview.rst b/docs/report/introduction/overview.rst
index 22bc29f071..4e290639ff 100644
--- a/docs/report/introduction/overview.rst
+++ b/docs/report/introduction/overview.rst
@@ -36,21 +36,6 @@ CSIT |release| report contains following main sections and sub-sections:
test job executions; *Test Environment* - environment description;
*Documentation* - CSIT source code documentation for VPP performance tests.
-#. **DPDK Performance Tests** - DPDK performance tests executed in
- physical FD.io testbeds; *Overview* - tested topologies, test coverage;
- *CSIT Release Notes* - changes in CSIT |release|, any known CSIT issues;
- *Packet Throughput Graphs* and *Packet Latency Graphs*
- - plotted NDR, PDR throughput and latency results from multiple test job
- executions; *Test Environment* - environment description; *Documentation* -
- CSIT source code documentation for DPDK performance tests.
-
-#. **Honeycomb Performance Tests** - Honeycomb performance tests executed in
- physical FD.io testbeds; *Overview* - tested topologies, test coverage
- and naming specifics; *CSIT Release Notes* - changes in CSIT |release|,
- added tests, environment or methodology changes, known CSIT issues;
- *Test Environment* - environment description; *Documentation* - source
- code documentation for Honeycomb performance tests.
-
#. **VPP Functional Tests** - VPP functional tests executed in virtual
FD.io testbeds; *Overview* - tested virtual topologies, test coverage and
naming specifics; *CSIT Release Notes* - changes in CSIT |release|, added
@@ -65,12 +50,6 @@ CSIT |release| report contains following main sections and sub-sections:
*Test Environment* - environment description ;
*Documentation* - source code documentation for Honeycomb functional tests.
-#. **VPP Unit Tests** - refers to VPP functional unit tests executed as
- part of vpp make test verify option within the FD.io VPP project; listed in
- this report to give a more complete view about executed VPP functional tests;
- *Overview* - short overview of unit test framework and executed tests;
- *Documentation* - source code documentation of VPP unit tests.
-
#. **NSH_SFC Functional Tests** - NSH_SFC functional tests executed in
virtual FD.io testbeds; *Overview* - tested virtual topologies, test
coverage and naming specifics; *CSIT Release Notes* - changes in CSIT
@@ -80,7 +59,7 @@ CSIT |release| report contains following main sections and sub-sections:
#. **Detailed Test Results** - auto-generated results from CSIT jobs
executions using CSIT Robot Framework output files as source data; *VPP
- Performance Results*, *DPDK Performance Results*, *VPP Functional
+ Performance Results*, *VPP Functional
Results*, *Honeycomb Functional Results*, *VPPtest Functional Results*.
#. **Test Configuration** - auto-generated DUT configuration data from CSIT jobs
diff --git a/docs/report/test_configuration/index.rst b/docs/report/test_configuration/index.rst
index 3963896b82..8c231a85cd 100644
--- a/docs/report/test_configuration/index.rst
+++ b/docs/report/test_configuration/index.rst
@@ -7,4 +7,6 @@
.. toctree::
vpp_performance_configuration/index
+ vpp_mrr_configuration/index
vpp_functional_configuration/index
+ vpp_functional_configuration_centos/index
diff --git a/docs/report/test_configuration/vpp_functional_configuration/index.rst b/docs/report/test_configuration/vpp_functional_configuration/index.rst
index aab94bd31c..36f72eba04 100644
--- a/docs/report/test_configuration/vpp_functional_configuration/index.rst
+++ b/docs/report/test_configuration/vpp_functional_configuration/index.rst
@@ -1,5 +1,5 @@
-VPP Functional Test Configs
-===========================
+VPP Functional Test Configs - Ubuntu
+====================================
.. note::
diff --git a/docs/report/test_configuration/vpp_functional_configuration_centos/index.rst b/docs/report/test_configuration/vpp_functional_configuration_centos/index.rst
new file mode 100644
index 0000000000..cf416ac48e
--- /dev/null
+++ b/docs/report/test_configuration/vpp_functional_configuration_centos/index.rst
@@ -0,0 +1,12 @@
+VPP Functional Test Configs - CentOS
+====================================
+
+.. note::
+
+ Data sources for reported test results: i) `FD.io test executor vpp
+ functional jobs`_, ii) archived FD.io jobs test result `output files
+ <../../_static/archive/>`_.
+
+.. toctree::
+
+ vpp_functional_configuration_centos
diff --git a/docs/report/test_configuration/vpp_mrr_configuration/index.rst b/docs/report/test_configuration/vpp_mrr_configuration/index.rst
new file mode 100644
index 0000000000..72b6ec01cd
--- /dev/null
+++ b/docs/report/test_configuration/vpp_mrr_configuration/index.rst
@@ -0,0 +1,12 @@
+VPP MRR Test Configs
+====================
+
+.. note::
+
+ Data sources for reported test results: i) `FD.io test executor vpp
+ performance jobs`_, ii) archived FD.io jobs test result `output files
+ <../../_static/archive/>`_.
+
+.. toctree::
+
+ vpp_mrr_configuration
diff --git a/docs/report/vpp_performance_tests/impact_meltdown/index.rst b/docs/report/vpp_performance_tests/impact_meltdown/index.rst
deleted file mode 100644
index 35ecb7453c..0000000000
--- a/docs/report/vpp_performance_tests/impact_meltdown/index.rst
+++ /dev/null
@@ -1,164 +0,0 @@
-Impact of Meltdown Patches
-==========================
-
-Following sections list changes to VPP throughput performance after applying
-patches addressing security vulnerabilities referred to as Meltdown
-(Variant3: Rogue Data Cache Load). Incremental kernel patches are
-applied for Ubuntu 16.04LTS as documented on
-`Ubuntu SpectreAndMeltdown page <https://wiki.ubuntu.com/SecurityTeam/KnowledgeBase/SpectreAndMeltdown>`_.
-Detailed listing of used software versions and patches is documented in
-:ref:`test_environment`.
-
-NDR and PDR packet throughput results are compared for 1-core/1-thread,
-2-cores/2-threads and 4-cores/4-threads VPP configurations, with
-reference performance numbers coming from tests without the Meltdown
-patches. Tables show test results grouped into Best 20 changes (minimal
-performance impact), followed by Worst 20 changes (maximal performance
-impact). All results are also provided in downloadable CSV and pretty
-ASCII formats.
-
-NDR Throughput: Best 20 Changes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. only:: html
-
- .. csv-table::
- :align: center
- :file: ../../../../_build/_static/vpp/meltdown-impact-ndr-1t1c-top.csv
-
-.. only:: latex
-
- .. raw:: latex
-
- \makeatletter
- \csvset{
- perfimprovements column width/.style={after head=\csv@pretable\begin{longtable}{m{4cm} m{#1} m{#1} m{#1} m{#1} m{#1}}\csv@tablehead},
- }
- \makeatother
-
- {\tiny
- \csvautobooklongtable[separator=comma,
- respect all,
- no check column count,
- perfimprovements column width=1cm,
- late after line={\\\hline},
- late after last line={\end{longtable}}
- ]{../_build/_static/vpp/meltdown-impact-ndr-1t1c-top.csv}
- }
-
-NDR Throughput: Worst 20 Changes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. only:: html
-
- .. csv-table::
- :align: center
- :file: ../../../../_build/_static/vpp/meltdown-impact-ndr-1t1c-bottom.csv
-
-.. only:: latex
-
- .. raw:: latex
-
- \makeatletter
- \csvset{
- perfimprovements column width/.style={after head=\csv@pretable\begin{longtable}{m{4cm} m{#1} m{#1} m{#1} m{#1} m{#1}}\csv@tablehead},
- }
- \makeatother
-
- {\tiny
- \csvautobooklongtable[separator=comma,
- respect all,
- no check column count,
- perfimprovements column width=1cm,
- late after line={\\\hline},
- late after last line={\end{longtable}}
- ]{../_build/_static/vpp/meltdown-impact-ndr-1t1c-bottom.csv}
- }
-
-.. only:: html
-
- NDR Throughput: All Changes
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- Complete results for all NDR tests are available in a CSV and pretty
- ASCII formats:
-
- - `csv format for 1t1c <../../_static/vpp/meltdown-impact-ndr-1t1c-full.csv>`_,
- - `csv format for 2t2c <../../_static/vpp/meltdown-impact-ndr-2t2c-full.csv>`_,
- - `csv format for 4t4c <../../_static/vpp/meltdown-impact-ndr-4t4c-full.csv>`_,
- - `pretty ASCII format for 1t1c <../../_static/vpp/meltdown-impact-ndr-1t1c-full.txt>`_,
- - `pretty ASCII format for 2t2c <../../_static/vpp/meltdown-impact-ndr-2t2c-full.txt>`_,
- - `pretty ASCII format for 4t4c <../../_static/vpp/meltdown-impact-ndr-4t4c-full.txt>`_.
-
-PDR Throughput: Best 20 Changes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. only:: html
-
- .. csv-table::
- :align: center
- :file: ../../../../_build/_static/vpp/meltdown-impact-pdr-1t1c-top.csv
-
-.. only:: latex
-
- .. raw:: latex
-
- \makeatletter
- \csvset{
- perfimprovements column width/.style={after head=\csv@pretable\begin{longtable}{m{4cm} m{#1} m{#1} m{#1} m{#1} m{#1}}\csv@tablehead},
- }
- \makeatother
-
- {\tiny
- \csvautobooklongtable[separator=comma,
- respect all,
- no check column count,
- perfimprovements column width=1cm,
- late after line={\\\hline},
- late after last line={\end{longtable}}
- ]{../_build/_static/vpp/meltdown-impact-pdr-1t1c-top.csv}
- }
-
-PDR Throughput: Worst 20 Changes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. only:: html
-
- .. csv-table::
- :align: center
- :file: ../../../../_build/_static/vpp/meltdown-impact-pdr-1t1c-bottom.csv
-
-.. only:: latex
-
- .. raw:: latex
-
- \makeatletter
- \csvset{
- perfimprovements column width/.style={after head=\csv@pretable\begin{longtable}{m{4cm} m{#1} m{#1} m{#1} m{#1} m{#1}}\csv@tablehead},
- }
- \makeatother
-
- {\tiny
- \csvautobooklongtable[separator=comma,
- respect all,
- no check column count,
- perfimprovements column width=1cm,
- late after line={\\\hline},
- late after last line={\end{longtable}}
- ]{../_build/_static/vpp/meltdown-impact-pdr-1t1c-bottom.csv}
- }
-
-.. only:: html
-
-PDR Throughput: All Changes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- Complete results for all PDR tests are available in a CSV and pretty
- ASCII formats:
-
- - `csv format for 1t1c <../../_static/vpp/meltdown-impact-pdr-1t1c-full.csv>`_,
- - `csv format for 2t2c <../../_static/vpp/meltdown-impact-pdr-2t2c-full.csv>`_,
- - `csv format for 4t4c <../../_static/vpp/meltdown-impact-pdr-4t4c-full.csv>`_,
- - `pretty ASCII format for 1t1c <../../_static/vpp/meltdown-impact-pdr-1t1c-full.txt>`_,
- - `pretty ASCII format for 2t2c <../../_static/vpp/meltdown-impact-pdr-2t2c-full.txt>`_,
- - `pretty ASCII format for 4t4c <../../_static/vpp/meltdown-impact-pdr-4t4c-full.txt>`_.
diff --git a/docs/report/vpp_performance_tests/impact_spectreandmeltdown/index.rst b/docs/report/vpp_performance_tests/impact_spectreandmeltdown/index.rst
deleted file mode 100644
index eec38e8698..0000000000
--- a/docs/report/vpp_performance_tests/impact_spectreandmeltdown/index.rst
+++ /dev/null
@@ -1,167 +0,0 @@
-Impact of SpectreAndMeltdown Patches
-====================================
-
-Following sections list changes to VPP throughput performance after
-applying patches addressing security vulnerabilities referred to as:
-Meltdown (Variant3: Rogue Data Cache Load) and Spectre (Variant1: Bounds
-Check Bypass; Variant2: Branch Target Injection) security
-vulnerabilities. Incremental kernel patches for Ubuntu 16.04 LTS as
-documented on
-`Ubuntu SpectreAndMeltdown page <https://wiki.ubuntu.com/SecurityTeam/KnowledgeBase/SpectreAndMeltdown>`_.
-For Spectre additional Processor microcode and BIOS firmware changes are
-applied. Detailed listing of used software versions and patches is
-documented in :ref:`test_environment`.
-
-NDR and PDR packet throughput results are compared for 1-core/1-thread,
-2-cores/2-threads and 4-cores/4-threads VPP configurations, with
-reference performance numbers coming from tests without the Meltdown
-patches. Tables show test results grouped into Best 20 changes (minimal
-performance impact), followed by Worst 20 changes (maximal performance
-impact). All results are also provided in downloadable CSV and pretty
-ASCII formats.
-
-NDR Throughput: Best 20 Changes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. only:: html
-
- .. csv-table::
- :align: center
- :file: ../../../../_build/_static/vpp/meltdown-spectre-impact-ndr-1t1c-top.csv
-
-.. only:: latex
-
- .. raw:: latex
-
- \makeatletter
- \csvset{
- perfimprovements column width/.style={after head=\csv@pretable\begin{longtable}{m{4cm} m{#1} m{#1} m{#1} m{#1} m{#1}}\csv@tablehead},
- }
- \makeatother
-
- {\tiny
- \csvautobooklongtable[separator=comma,
- respect all,
- no check column count,
- perfimprovements column width=1cm,
- late after line={\\\hline},
- late after last line={\end{longtable}}
- ]{../_build/_static/vpp/meltdown-spectre-impact-ndr-1t1c-top.csv}
- }
-
-NDR Throughput: Worst 20 Changes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. only:: html
-
- .. csv-table::
- :align: center
- :file: ../../../../_build/_static/vpp/meltdown-spectre-impact-ndr-1t1c-bottom.csv
-
-.. only:: latex
-
- .. raw:: latex
-
- \makeatletter
- \csvset{
- perfimprovements column width/.style={after head=\csv@pretable\begin{longtable}{m{4cm} m{#1} m{#1} m{#1} m{#1} m{#1}}\csv@tablehead},
- }
- \makeatother
-
- {\tiny
- \csvautobooklongtable[separator=comma,
- respect all,
- no check column count,
- perfimprovements column width=1cm,
- late after line={\\\hline},
- late after last line={\end{longtable}}
- ]{../_build/_static/vpp/meltdown-spectre-impact-ndr-1t1c-bottom.csv}
- }
-
-.. only:: html
-
- NDR Throughput: All Changes
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- Complete results for all NDR tests are available in a CSV and pretty
- ASCII formats:
-
- - `csv format for 1t1c <../../_static/vpp/meltdown-spectre-impact-ndr-1t1c-full.csv>`_,
- - `csv format for 2t2c <../../_static/vpp/meltdown-spectre-impact-ndr-2t2c-full.csv>`_,
- - `csv format for 4t4c <../../_static/vpp/meltdown-spectre-impact-ndr-4t4c-full.csv>`_,
- - `pretty ASCII format for 1t1c <../../_static/vpp/meltdown-spectre-impact-ndr-1t1c-full.txt>`_,
- - `pretty ASCII format for 2t2c <../../_static/vpp/meltdown-spectre-impact-ndr-2t2c-full.txt>`_,
- - `pretty ASCII format for 4t4c <../../_static/vpp/meltdown-spectre-impact-ndr-4t4c-full.txt>`_.
-
-PDR Throughput: Best 20 Changes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. only:: html
-
- .. csv-table::
- :align: center
- :file: ../../../../_build/_static/vpp/meltdown-spectre-impact-pdr-1t1c-top.csv
-
-.. only:: latex
-
- .. raw:: latex
-
- \makeatletter
- \csvset{
- perfimprovements column width/.style={after head=\csv@pretable\begin{longtable}{m{4cm} m{#1} m{#1} m{#1} m{#1} m{#1}}\csv@tablehead},
- }
- \makeatother
-
- {\tiny
- \csvautobooklongtable[separator=comma,
- respect all,
- no check column count,
- perfimprovements column width=1cm,
- late after line={\\\hline},
- late after last line={\end{longtable}}
- ]{../_build/_static/vpp/meltdown-spectre-impact-pdr-1t1c-top.csv}
- }
-
-PDR Throughput: Worst 20 Changes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. only:: html
-
- .. csv-table::
- :align: center
- :file: ../../../../_build/_static/vpp/meltdown-spectre-impact-pdr-1t1c-bottom.csv
-
-.. only:: latex
-
- .. raw:: latex
-
- \makeatletter
- \csvset{
- perfimprovements column width/.style={after head=\csv@pretable\begin{longtable}{m{4cm} m{#1} m{#1} m{#1} m{#1} m{#1}}\csv@tablehead},
- }
- \makeatother
-
- {\tiny
- \csvautobooklongtable[separator=comma,
- respect all,
- no check column count,
- perfimprovements column width=1cm,
- late after line={\\\hline},
- late after last line={\end{longtable}}
- ]{../_build/_static/vpp/meltdown-spectre-impact-pdr-1t1c-bottom.csv}
- }
-
-.. only:: html
-
- PDR Throughput: All Changes
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- Complete results for all PDR tests are available in a CSV and pretty
- ASCII formats:
-
- - `csv format for 1t1c <../../_static/vpp/meltdown-spectre-impact-pdr-1t1c-full.csv>`_,
- - `csv format for 2t2c <../../_static/vpp/meltdown-spectre-impact-pdr-2t2c-full.csv>`_,
- - `csv format for 4t4c <../../_static/vpp/meltdown-spectre-impact-pdr-4t4c-full.csv>`_,
- - `pretty ASCII format for 1t1c <../../_static/vpp/meltdown-spectre-impact-pdr-1t1c-full.txt>`_,
- - `pretty ASCII format for 2t2c <../../_static/vpp/meltdown-spectre-impact-pdr-2t2c-full.txt>`_,
- - `pretty ASCII format for 4t4c <../../_static/vpp/meltdown-spectre-impact-pdr-4t4c-full.txt>`_.
diff --git a/docs/report/vpp_performance_tests/packet_latency_graphs/index.rst b/docs/report/vpp_performance_tests/packet_latency_graphs/index.rst
index 50074e1b6a..4507978b66 100644
--- a/docs/report/vpp_performance_tests/packet_latency_graphs/index.rst
+++ b/docs/report/vpp_performance_tests/packet_latency_graphs/index.rst
@@ -27,6 +27,7 @@ TGint2-to-SUT2-to-SUT1-to-TGint1.
l2
ip4
ip6
+ srv6
ip4_tunnels
ip6_tunnels
vm_vhost
diff --git a/docs/report/vpp_performance_tests/packet_latency_graphs/srv6.rst b/docs/report/vpp_performance_tests/packet_latency_graphs/srv6.rst
new file mode 100644
index 0000000000..163d892cde
--- /dev/null
+++ b/docs/report/vpp_performance_tests/packet_latency_graphs/srv6.rst
@@ -0,0 +1,48 @@
+SRv6
+====
+
+This section includes summary graphs of VPP Phy-to-Phy packet latency
+with SRv6 measured at 50% of discovered NDR throughput
+rate. Latency is reported for VPP running in multiple configurations of
+VPP worker thread(s), a.k.a. VPP data plane thread(s), and their
+physical CPU core(s) placement.
+
+VPP packet latency in 1t1c setup (1thread, 1core) is presented in the graph below.
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/78B-1t1c-ethip6-srv6-ndrdisc-lat50.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{78B-1t1c-ethip6-srv6-ndrdisc-lat50}
+ \label{fig:78B-1t1c-ethip6-srv6-ndrdisc-lat50}
+ \end{figure}
+
+*Figure 1. VPP 1thread 1core - packet latency for Phy-to-Phy SRv6.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/srv6?h=rls1804>`_.
+
+VPP packet latency in 2t2c setup (2thread, 2core) is presented in the graph below.
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/78B-2t2c-ethip6-srv6-ndrdisc-lat50.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{78B-2t2c-ethip6-srv6-ndrdisc-lat50}
+ \label{fig:78B-2t2c-ethip6-srv6-ndrdisc-lat50}
+ \end{figure}
+
+*Figure 2. VPP 2threads 2cores - packet latency for Phy-to-Phy SRv6.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/srv6?h=rls1804>`_.
diff --git a/docs/report/vpp_performance_tests/packet_throughput_graphs/index.rst b/docs/report/vpp_performance_tests/packet_throughput_graphs/index.rst
index 6bfe4fe1af..ec3c9f9173 100644
--- a/docs/report/vpp_performance_tests/packet_throughput_graphs/index.rst
+++ b/docs/report/vpp_performance_tests/packet_throughput_graphs/index.rst
@@ -39,6 +39,7 @@ and their indices.
l2
ip4
ip6
+ srv6
ip4_tunnels
ip6_tunnels
vm_vhost
diff --git a/docs/report/vpp_performance_tests/packet_throughput_graphs/srv6.rst b/docs/report/vpp_performance_tests/packet_throughput_graphs/srv6.rst
new file mode 100644
index 0000000000..0df40ac636
--- /dev/null
+++ b/docs/report/vpp_performance_tests/packet_throughput_graphs/srv6.rst
@@ -0,0 +1,99 @@
+SRv6
+====
+
+Following sections include summary graphs of VPP Phy-to-Phy performance
+with SRv6, including NDR throughput (zero packet loss)
+and PDR throughput (<0.5% packet loss). Performance is reported for VPP
+running in multiple configurations of VPP worker thread(s), a.k.a. VPP
+data plane thread(s), and their physical CPU core(s) placement.
+
+NDR Throughput
+~~~~~~~~~~~~~~
+
+VPP NDR 78B packet throughput in 1t1c setup (1thread, 1core) is presented
+in the graph below.
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/78B-1t1c-ethip6-srv6-ndrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{78B-1t1c-ethip6-srv6-ndrdisc}
+ \label{fig:78B-1t1c-ethip6-srv6-ndrdisc}
+ \end{figure}
+
+*Figure 1. VPP 1thread 1core - NDR Throughput for Phy-to-Phy SRv6.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/srv6?h=rls1804>`_.
+
+VPP NDR 78B packet throughput in 2t2c setup (2thread, 2core) is presented
+in the graph below.
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/78B-2t2c-ethip6-srv6-ndrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{78B-2t2c-ethip6-srv6-ndrdisc}
+ \label{fig:78B-2t2c-ethip6-srv6-ndrdisc}
+ \end{figure}
+
+*Figure 2. VPP 2threads 2cores - NDR Throughput for Phy-to-Phy SRv6.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/srv6?h=rls1804>`_.
+
+PDR Throughput
+~~~~~~~~~~~~~~
+
+VPP PDR 78B packet throughput in 1t1c setup (1thread, 1core) is presented
+in the graph below. PDR measured for 0.5% packet loss ratio.
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/78B-1t1c-ethip6-srv6-pdrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{78B-1t1c-ethip6-srv6-pdrdisc}
+ \label{fig:78B-1t1c-ethip6-srv6-pdrdisc}
+ \end{figure}
+
+*Figure 3. VPP 1thread 1core - PDR Throughput for Phy-to-Phy SRv6.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/srv6?h=rls1804>`_.
+
+VPP PDR 78B packet throughput in 2t2c setup (2thread, 2core) is presented
+in the graph below. PDR measured for 0.5% packet loss ratio.
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/78B-2t2c-ethip6-srv6-pdrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{78B-2t2c-ethip6-srv6-pdrdisc}
+ \label{fig:78B-2t2c-ethip6-srv6-pdrdisc}
+ \end{figure}
+
+*Figure 4. VPP 2thread 2core - PDR Throughput for Phy-to-Phy IPv6
+Routed-Forwarding.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/srv6?h=rls1804>`_.
diff --git a/docs/report/vpp_performance_tests/throughput_speedup_multi_core/container_memif.rst b/docs/report/vpp_performance_tests/throughput_speedup_multi_core/container_memif.rst
new file mode 100644
index 0000000000..4fb8791dff
--- /dev/null
+++ b/docs/report/vpp_performance_tests/throughput_speedup_multi_core/container_memif.rst
@@ -0,0 +1,66 @@
+Container memif Connections
+===========================
+
+Following sections include Throughput Speedup Analysis for VPP multi-
+core multi-thread configurations with no Hyper-Threading, specifically
+for tested 2t2c (2threads, 2cores) and 4t4c scenarios. 1t1c throughput
+results are used as a reference for reported speedup ratio.
+Performance is reported for VPP
+running in multiple configurations of VPP worker thread(s), a.k.a. VPP
+data plane thread(s), and their physical CPU core(s) placement.
+
+NDR Throughput
+--------------
+
+VPP NDR 64B packet throughput speedup ratio is presented in the graphs
+below for 10ge2p1x520 network interface card.
+
+NIC 10ge2p1x520
+~~~~~~~~~~~~~~~
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/10ge2p1x520-64B-container-memif-tsa-ndrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{10ge2p1x520-64B-container-memif-tsa-ndrdisc}
+ \label{fig:10ge2p1x520-64B-container-memif-tsa-ndrdisc}
+ \end{figure}
+
+*Figure 1. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
+NDR Throughput for Phy-to-Phy L2 Ethernet Switching (base).*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/container_memif?h=rls1804>`_.
+
+PDR Throughput
+--------------
+
+VPP PDR 64B packet throughput speedup ratio is presented in the graphs
+below for 10ge2p1x520 network interface card.
+
+NIC 10ge2p1x520
+~~~~~~~~~~~~~~~
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/10ge2p1x520-64B-container-memif-tsa-pdrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{10ge2p1x520-64B-container-memif-tsa-pdrdisc}
+ \label{fig:10ge2p1x520-64B-container-memif-tsa-pdrdisc}
+ \end{figure}
+
+*Figure 2. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
+PDR Throughput for Phy-to-Phy L2 Ethernet Switching (base).*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/container_memif?h=rls1804>`_.
diff --git a/docs/report/vpp_performance_tests/throughput_speedup_multi_core/container_orchestrated.rst b/docs/report/vpp_performance_tests/throughput_speedup_multi_core/container_orchestrated.rst
new file mode 100644
index 0000000000..4e8ff4cd25
--- /dev/null
+++ b/docs/report/vpp_performance_tests/throughput_speedup_multi_core/container_orchestrated.rst
@@ -0,0 +1,110 @@
+Container Orchestrated Topologies
+=================================
+
+Following sections include Throughput Speedup Analysis for VPP multi-
+core multi-thread configurations with no Hyper-Threading, specifically
+for tested 2t2c (2threads, 2cores) and 4t4c scenarios. 1t1c throughput
+results are used as a reference for reported speedup ratio.
+Performance is reported for VPP
+running in multiple configurations of VPP worker thread(s), a.k.a. VPP
+data plane thread(s), and their physical CPU core(s) placement.
+
+NDR Throughput
+--------------
+
+VPP NDR 64B packet throughput speedup ratio is presented in the graphs
+below for 10ge2p1x520 and 10ge2p1x710 network interface cards.
+
+NIC 10ge2p1x520
+~~~~~~~~~~~~~~~
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/10ge2p1x520-64B-container-orchestrated-tsa-ndrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{10ge2p1x520-64B-container-orchestrated-tsa-ndrdisc}
+ \label{fig:10ge2p1x520-64B-container-orchestrated-tsa-ndrdisc}
+ \end{figure}
+
+*Figure 1. VPP 1thread 1core - NDR Throughput for Phy-to-Phy L2 Ethernet
+Switching (base).*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/kubernetes/perf/container_memif?h=rls1804>`_.
+
+NIC 10ge2p1x710
+~~~~~~~~~~~~~~~
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/10ge2p1x710-64B-container-orchestrated-tsa-ndrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{10ge2p1x710-64B-container-orchestrated-tsa-ndrdisc}
+ \label{fig:10ge2p1x710-64B-container-orchestrated-tsa-ndrdisc}
+ \end{figure}
+
+*Figure 2. VPP 1thread 1core - NDR Throughput for Phy-to-Phy L2 Ethernet
+Switching (base).*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/kubernetes/perf/container_memif?h=rls1804>`_.
+
+PDR Throughput
+--------------
+
+VPP PDR 64B packet throughput speedup ratio is presented in the graphs
+below for 10ge2p1x520 and 10ge2p1x710 network interface cards.
+
+NIC 10ge2p1x520
+~~~~~~~~~~~~~~~
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/10ge2p1x520-64B-container-orchestrated-tsa-pdrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{10ge2p1x520-64B-container-orchestrated-tsa-pdrdisc}
+ \label{fig:10ge2p1x520-64B-container-orchestrated-tsa-pdrdisc}
+ \end{figure}
+
+*Figure 3. VPP 1thread 1core - NDR Throughput for Phy-to-Phy L2 Ethernet
+Switching (base).*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/kubernetes/perf/container_memif?h=rls1804>`_.
+
+NIC 10ge2p1x710
+~~~~~~~~~~~~~~~
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/10ge2p1x710-64B-container-orchestrated-tsa-pdrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{10ge2p1x710-64B-container-orchestrated-tsa-pdrdisc}
+ \label{fig:10ge2p1x710-64B-container-orchestrated-tsa-pdrdisc}
+ \end{figure}
+
+*Figure 4. VPP 1thread 1core - NDR Throughput for Phy-to-Phy L2 Ethernet
+Switching (base).*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/kubernetes/perf/container_memif?h=rls1804>`_.
diff --git a/docs/report/vpp_performance_tests/throughput_speedup_multi_core/index.rst b/docs/report/vpp_performance_tests/throughput_speedup_multi_core/index.rst
index ddce548b6c..dca77c5b7b 100644
--- a/docs/report/vpp_performance_tests/throughput_speedup_multi_core/index.rst
+++ b/docs/report/vpp_performance_tests/throughput_speedup_multi_core/index.rst
@@ -1,3 +1,5 @@
+.. _throughput_speedup_multi_core:
+
Throughput Speedup Multi-Core
=============================
@@ -18,3 +20,10 @@ threaded VPP configurations relative to 1-core configurations.
l2
ip4
ip6
+ srv6
+ ip4_tunnels
+ ip6_tunnels
+ vm_vhost
+ container_memif
+ container_orchestrated
+ ipsec
diff --git a/docs/report/vpp_performance_tests/throughput_speedup_multi_core/ip4.rst b/docs/report/vpp_performance_tests/throughput_speedup_multi_core/ip4.rst
index af9bd255be..0b2b7ef791 100644
--- a/docs/report/vpp_performance_tests/throughput_speedup_multi_core/ip4.rst
+++ b/docs/report/vpp_performance_tests/throughput_speedup_multi_core/ip4.rst
@@ -31,24 +31,12 @@ NIC 10ge2p1x520
\label{fig:10ge2p1x520-64B-ip4-tsa-ndrdisc}
\end{figure}
-CSIT source code for the test cases used for above plots can be found in CSIT
-git repository:
-
-.. only:: html
-
- .. program-output:: cd ../../../../../ && set +x && cd tests/vpp/perf/ip4 && grep -E '64B-(1t1c|2t2c|4t4c)-ethip4-ip4(base|scale[a-z0-9]*)*-ndrdisc' 10ge2p1x520*
- :shell:
-
-.. only:: latex
-
- .. code-block:: bash
-
- $ cd tests/vpp/perf/ip4
- $ grep -E '64B-(1t1c|2t2c|4t4c)-ethip4-ip4(base|scale[a-z0-9]*)*-ndrdisc' 10ge2p1x520*
-
*Figure 1. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
NDR Throughput for Phy-to-Phy IPv4 Routed-Forwarding.*
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/ip4?h=rls1804>`_.
+
NIC 40ge2p1xl710
~~~~~~~~~~~~~~~~
@@ -65,24 +53,12 @@ NIC 40ge2p1xl710
\label{fig:40ge2p1xl710-64B-ip4-tsa-ndrdisc}
\end{figure}
-CSIT source code for the test cases used for above plots can be found in CSIT
-git repository:
-
-.. only:: html
-
- .. program-output:: cd ../../../../../ && set +x && cd tests/vpp/perf/ip4 && grep -P '64B-(1t1c|2t2c|4t4c)-ethip4-ip4(base|scale[a-z0-9]*)*-ndrdisc' 40ge2p1xl710*
- :shell:
-
-.. only:: latex
-
- .. code-block:: bash
-
- $ cd tests/vpp/perf/ip4
- $ grep -P '64B-(1t1c|2t2c|4t4c)-ethip4-ip4(base|scale[a-z0-9]*)*-ndrdisc' 40ge2p1xl710*
-
*Figure 2. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
NDR Throughput for Phy-to-Phy IPv4 Routed-Forwarding.*
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/ip4?h=rls1804>`_.
+
PDR Throughput
--------------
@@ -106,20 +82,8 @@ NIC 10ge2p1x520
\label{fig:10ge2p1x520-64B-ip4-tsa-pdrdisc}
\end{figure}
-CSIT source code for the test cases used for above plots can be found in CSIT
-git repository:
-
-.. only:: html
-
- .. program-output:: cd ../../../../../ && set +x && cd tests/vpp/perf/ip4 && grep -E '64B-(1t1c|2t2c|4t4c)-ethip4-ip4(base|scale[a-z0-9]*)*-ndrdisc' 10ge2p1x520*
- :shell:
-
-.. only:: latex
-
- .. code-block:: bash
-
- $ cd tests/vpp/perf/ip4
- $ grep -E '64B-(1t1c|2t2c|4t4c)-ethip4-ip4(base|scale[a-z0-9]*)*-ndrdisc' 10ge2p1x520*
-
*Figure 3. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
PDR Throughput for Phy-to-Phy IPv4 Routed-Forwarding.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/ip4?h=rls1804>`_.
diff --git a/docs/report/vpp_performance_tests/throughput_speedup_multi_core/ip4_tunnels.rst b/docs/report/vpp_performance_tests/throughput_speedup_multi_core/ip4_tunnels.rst
new file mode 100644
index 0000000000..001584b856
--- /dev/null
+++ b/docs/report/vpp_performance_tests/throughput_speedup_multi_core/ip4_tunnels.rst
@@ -0,0 +1,66 @@
+IPv4 Overlay Tunnels
+====================
+
+Following sections include Throughput Speedup Analysis for VPP multi-
+core multi-thread configurations with no Hyper-Threading, specifically
+for tested 2t2c (2threads, 2cores) and 4t4c scenarios. 1t1c throughput
+results are used as a reference for reported speedup ratio.
+Performance is reported for VPP
+running in multiple configurations of VPP worker thread(s), a.k.a. VPP
+data plane thread(s), and their physical CPU core(s) placement.
+
+NDR Throughput
+--------------
+
+VPP NDR 64B packet throughput speedup ratio is presented in the graphs
+below for 10ge2p1x520 network interface card.
+
+NIC 10ge2p1x520
+~~~~~~~~~~~~~~~
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/10ge2p1x520-64B-ethip4-tsa-ndrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{10ge2p1x520-64B-ethip4-tsa-ndrdisc}
+ \label{fig:10ge2p1x520-64B-ethip4-tsa-ndrdisc}
+ \end{figure}
+
+*Figure 1. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
+NDR Throughput for Phy-to-Phy IPv4 Overlay Tunnels.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/ip4_tunnels?h=rls1804>`_.
+
+PDR Throughput
+--------------
+
+VPP PDR 64B packet throughput speedup ratio is presented in the graphs
+below for 10ge2p1x520 network interface card.
+
+NIC 10ge2p1x520
+~~~~~~~~~~~~~~~
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/10ge2p1x520-64B-ethip4-tsa-pdrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{10ge2p1x520-64B-ethip4-tsa-pdrdisc}
+ \label{fig:10ge2p1x520-64B-ethip4-tsa-pdrdisc}
+ \end{figure}
+
+*Figure 2. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
+PDR Throughput for Phy-to-Phy IPv4 Overlay Tunnels.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/ip4_tunnels?h=rls1804>`_.
diff --git a/docs/report/vpp_performance_tests/throughput_speedup_multi_core/ip6.rst b/docs/report/vpp_performance_tests/throughput_speedup_multi_core/ip6.rst
index 077b330652..e8c70d0c18 100644
--- a/docs/report/vpp_performance_tests/throughput_speedup_multi_core/ip6.rst
+++ b/docs/report/vpp_performance_tests/throughput_speedup_multi_core/ip6.rst
@@ -32,24 +32,12 @@ NIC 10ge2p1x520
\label{fig:10ge2p1x520-78B-ip6-tsa-ndrdisc}
\end{figure}
-CSIT source code for the test cases used for above plots can be found in CSIT
-git repository:
-
-.. only:: html
-
- .. program-output:: cd ../../../../../ && set +x && cd tests/vpp/perf/ip6 && grep -E '78B-(1t1c|2t2c|4t4c)-ethip6-ip6(base|scale[a-z0-9]*)*-ndrdisc' 10ge2p1x520*
- :shell:
-
-.. only:: latex
-
- .. code-block:: bash
-
- $ cd tests/vpp/perf/ip6
- $ grep -E '78B-(1t1c|2t2c|4t4c)-ethip6-ip6(base|scale[a-z0-9]*)*-ndrdisc' 10ge2p1x520*
-
*Figure 1. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
NDR Throughput for Phy-to-Phy IPv6 Routed-Forwarding.*
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/ip6?h=rls1804>`_.
+
NIC 40ge2p1xl710
~~~~~~~~~~~~~~~~
@@ -66,24 +54,12 @@ NIC 40ge2p1xl710
\label{fig:40ge2p1xl710-78B-ip6-tsa-ndrdisc}
\end{figure}
-CSIT source code for the test cases used for above plots can be found in CSIT
-git repository:
-
-.. only:: html
-
- .. program-output:: cd ../../../../../ && set +x && cd tests/vpp/perf/ip6 && grep -E '78B-(1t1c|2t2c|4t4c)-ethip6-ip6(base|scale[a-z0-9]*)*-ndrdisc' 40ge2p1xl710*
- :shell:
-
-.. only:: latex
-
- .. code-block:: bash
-
- $ cd tests/vpp/perf/ip6
- $ grep -E '78B-(1t1c|2t2c|4t4c)-ethip6-ip6(base|scale[a-z0-9]*)*-ndrdisc' 40ge2p1xl710*
-
*Figure 2. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
NDR Throughput for Phy-to-Phy IPv6 Routed-Forwarding.*
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/ip6?h=rls1804>`_.
+
PDR Throughput
--------------
@@ -107,20 +83,8 @@ NIC 10ge2p1x520
\label{fig:10ge2p1x520-78B-ip6-tsa-pdrdisc}
\end{figure}
-CSIT source code for the test cases used for above plots can be found in CSIT
-git repository:
-
-.. only:: html
-
- .. program-output:: cd ../../../../../ && set +x && cd tests/vpp/perf/ip6 && grep -E '78B-(1t1c|2t2c|4t4c)-ethip6-ip6(base|scale[a-z0-9]*)*-pdrdisc' 10ge2p1x520*
- :shell:
-
-.. only:: latex
-
- .. code-block:: bash
-
- $ cd tests/vpp/perf/ip6
- $ grep -E '78B-(1t1c|2t2c|4t4c)-ethip6-ip6(base|scale[a-z0-9]*)*-pdrdisc' 10ge2p1x520*
-
*Figure 3. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
PDR Throughput for Phy-to-Phy IPv6 Routed-Forwarding.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/ip6?h=rls1804>`_.
diff --git a/docs/report/vpp_performance_tests/throughput_speedup_multi_core/ip6_tunnels.rst b/docs/report/vpp_performance_tests/throughput_speedup_multi_core/ip6_tunnels.rst
new file mode 100644
index 0000000000..3965242e8c
--- /dev/null
+++ b/docs/report/vpp_performance_tests/throughput_speedup_multi_core/ip6_tunnels.rst
@@ -0,0 +1,66 @@
+IPv6 Overlay Tunnels
+====================
+
+Following sections include Throughput Speedup Analysis for VPP multi-
+core multi-thread configurations with no Hyper-Threading, specifically
+for tested 2t2c (2threads, 2cores) and 4t4c scenarios. 1t1c throughput
+results are used as a reference for reported speedup ratio.
+Performance is reported for VPP
+running in multiple configurations of VPP worker thread(s), a.k.a. VPP
+data plane thread(s), and their physical CPU core(s) placement.
+
+NDR Throughput
+--------------
+
+VPP NDR 64B packet throughput speedup ratio is presented in the graphs
+below for 10ge2p1x520 network interface card.
+
+NIC 10ge2p1x520
+~~~~~~~~~~~~~~~
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/10ge2p1x520-78B-ethip6-tsa-ndrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{10ge2p1x520-78B-ethip6-tsa-ndrdisc}
+ \label{fig:10ge2p1x520-78B-ethip6-tsa-ndrdisc}
+ \end{figure}
+
+*Figure 1. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
+NDR Throughput for Phy-to-Phy IPv6 Overlay Tunnels.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/ip6_tunnels?h=rls1804>`_.
+
+PDR Throughput
+--------------
+
+VPP PDR 64B packet throughput speedup ratio is presented in the graphs
+below for 10ge2p1x520 network interface card.
+
+NIC 10ge2p1x520
+~~~~~~~~~~~~~~~
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/10ge2p1x520-78B-ethip6-tsa-pdrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{10ge2p1x520-78B-ethip6-tsa-pdrdisc}
+ \label{fig:10ge2p1x520-78B-ethip6-tsa-pdrdisc}
+ \end{figure}
+
+*Figure 2. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
+PDR Throughput for Phy-to-Phy IPv6 Overlay Tunnels.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/ip6_tunnels?h=rls1804>`_.
diff --git a/docs/report/vpp_performance_tests/throughput_speedup_multi_core/ipsec.rst b/docs/report/vpp_performance_tests/throughput_speedup_multi_core/ipsec.rst
new file mode 100644
index 0000000000..a5a4d7d1bc
--- /dev/null
+++ b/docs/report/vpp_performance_tests/throughput_speedup_multi_core/ipsec.rst
@@ -0,0 +1,71 @@
+IPSec Crypto HW: IP4 Routed-Forwarding
+======================================
+
+Following sections include Throughput Speedup Analysis for VPP multi-
+core multi-thread configurations with no Hyper-Threading, specifically
+for tested 2t2c (2threads, 2cores) and 4t4c scenarios. 1t1c throughput
+results are used as a reference for reported speedup ratio.
+VPP IPSec encryption is accelerated using DPDK cryptodev
+library driving Intel Quick Assist (QAT) crypto PCIe hardware cards.
+Performance is reported for VPP running in multiple configurations of
+VPP worker thread(s), a.k.a. VPP data plane thread(s), and their
+physical CPU core(s) placement.
+
+NDR Throughput
+--------------
+
+VPP NDR 64B packet throughput speedup ratio is presented in the graphs
+below for 40ge2p1xl710 network interface card.
+
+NIC 40ge2p1xl710
+~~~~~~~~~~~~~~~~
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/40ge2p1xl710-64B-ipsechw-tsa-ndrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{40ge2p1xl710-64B-ipsechw-tsa-ndrdisc}
+ \label{fig:40ge2p1xl710-64B-ipsechw-tsa-ndrdisc}
+ \end{figure}
+
+*Figure 1. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
+NDR Throughput for Phy-to-Phy IPSEC HW.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/crypto?h=rls1804>`_.
+
+PDR Throughput
+--------------
+
+VPP PDR 64B packet throughput speedup ratio is presented in the graphs
+below for 40ge2p1xl710 network interface card.
+
+NIC 40ge2p1xl710
+~~~~~~~~~~~~~~~~
+
+VPP PDR 64B packet throughput in 1t1c setup (1thread, 1core) is presented
+in the graph below. PDR measured for 0.5% packet loss ratio.
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/40ge2p1xl710-64B-ipsechw-tsa-pdrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{40ge2p1xl710-64B-ipsechw-tsa-pdrdisc}
+ \label{fig:40ge2p1xl710-64B-ipsechw-tsa-pdrdisc}
+ \end{figure}
+
+*Figure 2. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
+PDR Throughput for Phy-to-Phy IPSEC HW.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/crypto?h=rls1804>`_.
diff --git a/docs/report/vpp_performance_tests/throughput_speedup_multi_core/l2.rst b/docs/report/vpp_performance_tests/throughput_speedup_multi_core/l2.rst
index 2df3fb6229..5db8eba307 100644
--- a/docs/report/vpp_performance_tests/throughput_speedup_multi_core/l2.rst
+++ b/docs/report/vpp_performance_tests/throughput_speedup_multi_core/l2.rst
@@ -31,23 +31,12 @@ NIC 10ge2p1x520
\label{fig:10ge2p1x520-64B-l2-tsa-ndrdisc}
\end{figure}
-CSIT source code for the test cases used for above plots can be found in CSIT
-git repository:
-
-.. only:: html
-
- .. program-output:: cd ../../../../../ && set +x && cd tests/vpp/perf/l2 && grep -E "64B-(1t1c|2t2c|4t4c)-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn|l2bdscale.*|l2dbscale.*)-(eth.*)*ndrdisc" 10ge2p1x520*
- :shell:
-
-.. only:: latex
-
- .. code-block:: bash
-
- $ grep -E "64B-(1t1c|2t2c|4t4c)-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn|l2bdscale.*|l2dbscale.*)-(eth.*)*ndrdisc" tests/vpp/perf/l2/10ge2p1x520*
-
*Figure 1. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
NDR Throughput for Phy-to-Phy L2 Ethernet Switching.*
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/l2?h=rls1804>`_.
+
NIC 40ge2p1xl710
~~~~~~~~~~~~~~~~
@@ -64,23 +53,12 @@ NIC 40ge2p1xl710
\label{fig:40ge2p1xl710-64B-l2-tsa-ndrdisc}
\end{figure}
-CSIT source code for the test cases used for above plots can be found in CSIT
-git repository:
-
-.. only:: html
-
- .. program-output:: cd ../../../../../ && set +x && cd tests/vpp/perf/l2 && grep -E "64B-(1t1c|2t2c|4t4c)-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn|l2bdscale.*|l2dbscale.*)-(eth.*)*ndrdisc" 40ge2p1xl710*
- :shell:
-
-.. only:: latex
-
- .. code-block:: bash
-
- $ grep -E "64B-(1t1c|2t2c|4t4c)-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn|l2bdscale.*|l2dbscale.*)-(eth.*)*ndrdisc" tests/vpp/perf/l2/40ge2p1xl710*
-
*Figure 2. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
NDR Throughput for Phy-to-Phy L2 Ethernet Switching.*
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/l2?h=rls1804>`_.
+
PDR Throughput
--------------
@@ -104,19 +82,8 @@ NIC 10ge2p1x520
\label{fig:10ge2p1x520-64B-l2-tsa-pdrdisc}
\end{figure}
-CSIT source code for the test cases used for above plots can be found in CSIT
-git repository:
-
-.. only:: html
-
- .. program-output:: cd ../../../../../ && set +x && cd tests/vpp/perf/l2 && grep -E "64B-(1t1c|2t2c|4t4c)-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn|l2bdscale.*|l2dbscale.*)-(eth.*)*pdrdisc" 10ge2p1x520*
- :shell:
-
-.. only:: latex
-
- .. code-block:: bash
-
- $ grep -E "64B-(1t1c|2t2c|4t4c)-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn|l2bdscale.*|l2dbscale.*)-(eth.*)*pdrdisc" tests/vpp/perf/l2/10ge2p1x520*
-
*Figure 3. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
PDR Throughput for Phy-to-Phy L2 Ethernet Switching.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/l2?h=rls1804>`_.
diff --git a/docs/report/vpp_performance_tests/throughput_speedup_multi_core/srv6.rst b/docs/report/vpp_performance_tests/throughput_speedup_multi_core/srv6.rst
new file mode 100644
index 0000000000..4ece4d538e
--- /dev/null
+++ b/docs/report/vpp_performance_tests/throughput_speedup_multi_core/srv6.rst
@@ -0,0 +1,68 @@
+SRv6
+====
+
+Following sections include Throughput Speedup Analysis for VPP multi-
+core multi-thread configurations with no Hyper-Threading, specifically
+for tested 2t2c (2threads, 2cores) and 4t4c scenarios. 1t1c throughput
+results are used as a reference for reported speedup ratio. Input data
+used for the graphs comes from Phy-to-Phy 78B performance tests with VPP
+SRv6, including NDR throughput (zero packet loss) and
+PDR throughput (<0.5% packet loss).
+
+NDR Throughput
+--------------
+
+VPP NDR 78B packet throughput speedup ratio is presented in the graphs
+below for 10ge2p1x520 network interface card.
+
+
+NIC 10ge2p1x520
+~~~~~~~~~~~~~~~
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/10ge2p1x520-78B-srv6-tsa-ndrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{10ge2p1x520-78B-srv6-tsa-ndrdisc}
+ \label{fig:10ge2p1x520-78B-srv6-tsa-ndrdisc}
+ \end{figure}
+
+*Figure 1. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
+NDR Throughput for Phy-to-Phy SRv6.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/srv6?h=rls1804>`_.
+
+PDR Throughput
+--------------
+
+VPP PDR 78B packet throughput speedup ratio is presented in the graphs
+below for 10ge2p1x520 network interface card. PDR
+measured for 0.5% packet loss ratio.
+
+NIC 10ge2p1x520
+~~~~~~~~~~~~~~~
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/10ge2p1x520-78B-srv6-tsa-pdrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{10ge2p1x520-78B-srv6-tsa-pdrdisc}
+ \label{fig:10ge2p1x520-78B-srv6-tsa-pdrdisc}
+ \end{figure}
+
+*Figure 3. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
+PDR Throughput for Phy-to-Phy SRv6.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/srv6?h=rls1804>`_.
diff --git a/docs/report/vpp_performance_tests/throughput_speedup_multi_core/vm_vhost.rst b/docs/report/vpp_performance_tests/throughput_speedup_multi_core/vm_vhost.rst
new file mode 100644
index 0000000000..9bd49b42ab
--- /dev/null
+++ b/docs/report/vpp_performance_tests/throughput_speedup_multi_core/vm_vhost.rst
@@ -0,0 +1,229 @@
+VM vhost Connections
+====================
+Following sections include Throughput Speedup Analysis for VPP multi-
+core multi-thread configurations with no Hyper-Threading, specifically
+for tested 2t2c (2threads, 2cores) and 4t4c scenarios. 1t1c throughput
+results are used as a reference for reported speedup ratio. Input data
+used for the graphs comes from Phy-to-Phy 64B performance tests with
+VM vhost-user, including NDR throughput (zero packet loss) and
+PDR throughput (<0.5% packet loss).
+
+NDR Throughput
+--------------
+
+VPP NDR 64B packet throughput speedup ratio is presented in the graphs
+below for 10ge2p1x520, 10ge2p1x710 and 40ge2p1xl710 network interface cards.
+
+NIC 10ge2p1x520
+~~~~~~~~~~~~~~~
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/10ge2p1x520-64B-vhost-sel1-tsa-ndrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{10ge2p1x520-64B-vhost-sel1-tsa-ndrdisc}
+ \label{fig:10ge2p1x520-64B-vhost-sel1-tsa-ndrdisc}
+ \end{figure}
+
+*Figure 1a. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
+NDR Throughput for Phy-to-Phy VM vhost-user selected TCs.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/vm_vhost?h=rls1804>`_.
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/10ge2p1x520-64B-vhost-sel2-tsa-ndrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{10ge2p1x520-64B-vhost-sel2-tsa-ndrdisc}
+ \label{fig:10ge2p1x520-64B-vhost-sel2-tsa-ndrdisc}
+ \end{figure}
+
+*Figure 1b. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
+NDR Throughput for Phy-to-Phy VM vhost-user selected TCs.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/vm_vhost?h=rls1804>`_.
+
+NIC 10ge2p1x710
+~~~~~~~~~~~~~~~
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/10ge2p1x710-64B-vhost-sel2-tsa-ndrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{10ge2p1x710-64B-vhost-sel2-tsa-ndrdisc}
+ \label{fig:10ge2p1x710-64B-vhost-sel2-tsa-ndrdisc}
+ \end{figure}
+
+*Figure 2. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
+NDR Throughput for Phy-to-Phy VM vhost-user selected TCs.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/vm_vhost?h=rls1804>`_.
+
+NIC 40ge2p1xl710
+~~~~~~~~~~~~~~~~
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/40ge2p1xl710-64B-vhost-sel1-tsa-ndrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{40ge2p1xl710-64B-vhost-sel1-tsa-ndrdisc}
+ \label{fig:40ge2p1xl710-64B-vhost-sel1-tsa-ndrdisc}
+ \end{figure}
+
+*Figure 3a. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
+NDR Throughput for Phy-to-Phy VM vhost-user selected TCs.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/vm_vhost?h=rls1804>`_.
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/40ge2p1xl710-64B-vhost-sel2-tsa-ndrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{40ge2p1xl710-64B-vhost-sel2-tsa-ndrdisc}
+ \label{fig:40ge2p1xl710-64B-vhost-sel2-tsa-ndrdisc}
+ \end{figure}
+
+*Figure 3b. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
+NDR Throughput for Phy-to-Phy VM vhost-user selected TCs.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/vm_vhost?h=rls1804>`_.
+
+PDR Throughput
+--------------
+
+VPP PDR 64B packet throughput speedup ratio is presented in the graphs
+below for 10ge2p1x520, 10ge2p1x710 and 40ge2p1xl710 network interface cards.
+
+NIC 10ge2p1x520
+~~~~~~~~~~~~~~~
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/10ge2p1x520-64B-vhost-sel1-tsa-pdrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{10ge2p1x520-64B-vhost-sel1-tsa-pdrdisc}
+ \label{fig:10ge2p1x520-64B-vhost-sel1-tsa-pdrdisc}
+ \end{figure}
+
+*Figure 4a. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
+PDR Throughput for Phy-to-Phy VM vhost-user selected TCs.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/vm_vhost?h=rls1804>`_.
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/10ge2p1x520-64B-vhost-sel2-tsa-pdrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{10ge2p1x520-64B-vhost-sel2-tsa-pdrdisc}
+ \label{fig:10ge2p1x520-64B-vhost-sel2-tsa-pdrdisc}
+ \end{figure}
+
+*Figure 4b. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
+PDR Throughput for Phy-to-Phy VM vhost-user selected TCs.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/vm_vhost?h=rls1804>`_.
+
+NIC 10ge2p1x710
+~~~~~~~~~~~~~~~
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/10ge2p1x710-64B-vhost-sel2-tsa-pdrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{10ge2p1x710-64B-vhost-sel2-tsa-pdrdisc}
+ \label{fig:10ge2p1x710-64B-vhost-sel2-tsa-pdrdisc}
+ \end{figure}
+
+*Figure 5. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
+PDR Throughput for Phy-to-Phy VM vhost-user selected TCs.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/vm_vhost?h=rls1804>`_.
+
+NIC 40ge2p1xl710
+~~~~~~~~~~~~~~~~
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/40ge2p1xl710-64B-vhost-sel1-tsa-pdrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{40ge2p1xl710-64B-vhost-sel1-tsa-pdrdisc}
+ \label{fig:40ge2p1xl710-64B-vhost-sel1-tsa-pdrdisc}
+ \end{figure}
+
+*Figure 6a. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
+PDR Throughput for Phy-to-Phy VM vhost-user selected TCs.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/vm_vhost?h=rls1804>`_.
+
+.. raw:: html
+
+ <iframe width="700" height="1000" frameborder="0" scrolling="no" src="../../_static/vpp/40ge2p1xl710-64B-vhost-sel2-tsa-pdrdisc.html"></iframe>
+
+.. raw:: latex
+
+ \begin{figure}[H]
+ \centering
+ \graphicspath{{../_build/_static/vpp/}}
+ \includegraphics[clip, trim=0cm 8cm 5cm 0cm, width=0.70\textwidth]{40ge2p1xl710-64B-vhost-sel2-tsa-pdrdisc}
+ \label{fig:40ge2p1xl710-64B-vhost-sel2-tsa-pdrdisc}
+ \end{figure}
+
+*Figure 6b. Throughput Speedup Analysis - Multi-Core Speedup Ratio - Normalized
+PDR Throughput for Phy-to-Phy VM vhost-user selected TCs.*
+
+CSIT source code for the test cases used for above plots can be found in
+`CSIT git repository <https://git.fd.io/csit/tree/tests/vpp/perf/vm_vhost?h=rls1804>`_.
diff --git a/docs/report/vpp_unit_tests/documentation.rst b/docs/report/vpp_unit_tests/documentation.rst
deleted file mode 100644
index 304db1121b..0000000000
--- a/docs/report/vpp_unit_tests/documentation.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-Documentation
-=============
-
-For complete description of the VPP Test Framework including anatomy of a test
-case and detailed documentation of existing VPP unit test cases please refer
-to the `VPP test framework documentation`_.
diff --git a/docs/report/vpp_unit_tests/index.rst b/docs/report/vpp_unit_tests/index.rst
deleted file mode 100644
index 525eb1230d..0000000000
--- a/docs/report/vpp_unit_tests/index.rst
+++ /dev/null
@@ -1,8 +0,0 @@
-VPP Unit Tests
-==============
-
-.. toctree::
-
- overview
- documentation
-
diff --git a/docs/report/vpp_unit_tests/overview.rst b/docs/report/vpp_unit_tests/overview.rst
deleted file mode 100644
index 9472680aa5..0000000000
--- a/docs/report/vpp_unit_tests/overview.rst
+++ /dev/null
@@ -1,87 +0,0 @@
-Overview
-========
-
-.. note::
-
- This section includes an abbreviated version of the VPP Test Framework
- overview maintained within the VPP project. Complete overview can be found
- in `VPP test framework documentation`_.
-
-VPP Unit Test Framework
------------------------
-
-VPP Test Framework is used to ease writing, running and debugging unit tests
-for the VPP. It is based on python as a high level language to allow rapid
-test development. scapy\_ is used as a tool for creating and dissecting
-packets.
-
-VPP Test Framework does not send any packets to VPP directly. Traffic is
-instead injected using VPP packet-generator interfaces. Packets are written
-into a temporary .pcap file, which is then read by the VPP code with packets
-getting injected into the VPP processing nodes.
-
-Similarly, VPP does not send any packets to VPP Test Framework directly.
-Instead, VPP packet capture feature is used to capture and write packets to a
-temporary .pcap file, which is then read and analyzed by the VPP Test
-Framework.
-
-For complete description of the VPP Test Framework including anatomy of a test
-case and detailed documentation of existing VPP unit test cases please refer
-to the `VPP test framework documentation`_
-
-Unit Tests Coverage
--------------------
-
-Following VPP functional test areas are covered in VPP unit test code included
-in VPP rls1710 with results listed in this report:
-
-- ACL Security - stateful and stateless security-groups access-control-lists.
-- APIs - VAPI, VOM, PAPI, JVPP.
-- ARP - ARP, proxy ARP, static arp.
-- BFD - API, Authentication, Authentication Change, CLI.
-- BFD IPv4 - sessions operation.
-- BFD IPv6 - sessions operation.
-- BIER - Bit Indexed Explicit Replication.
-- Classifier - classification with IP ACL, MAC ACL, IP PBR.
-- Container Integration - IPv4, IPv6 local-spoof connectivity tests.
-- CRUD Loopback - create, read, update, delete Loopback interfaces.
-- DHCP - DHCPv4/v6 Client and Proxy.
-- Distributed Virtual Router.
-- DS-Lite Softwire - softwire termination.
-- FIB - baseline and scale tests.
-- Flowprobe.
-- Geneve Tunnels.
-- GRE Tunnels - GRE IPv4/IPv6 tunnel, L2, VRF tests.
-- GTPU Tunnels - baseline GTPU tests.
-- IP Multicast Routing - IPv4/IPv6 multicast replication, connected source check.
-- IPSec - baseline IPSec sanity tests.
-- IPv4 FIB CRUD - add/update/delete IPv4 routes.
-- IPv4 Routing.
-- IP4 VRF Multi-instance - create, read, update, delete and verify IPv4 VRFs.
-- IPv6 Routing - baseline FIB operations, NS/RS exception handling.
-- IP6 VRF Multi-instance - create, read, update, delete and verify IPv6 VRFs.
-- IRB Integrated Routing-Bridging.
-- Kube-proxy - data plane NAT tests.
-- L2 FIB CRUD - add/update/delete L2 MAC entries.
-- L2BD Multi-instance.
-- L2BD Switching - L2 Bridge-Domain baseline tests incl. single- and dual-loop.
-- L2XC Multi-instance - L2 cross-connect multi-instance tests.
-- L2XC Switching - L2 cross-connect baseline tests incl. single- and dual-loop.
-- LISP Tunnels - basic LISP tests.
-- Load Balancer - IP4 GRE4, IP4 GRE6, IP6 GRE4, IP6 GRE6.
-- MACIP Access Control - ingress access control for IPv4, IPv6 with L2BDP and IP routing.
-- MAP Softwires - softwire termination.
-- MFIB Multicast FIB.
-- MPLS Switching - MPLS baseline, prefix independent convergence for MPLS PE.
-- NAT44 - NAT44 tests, IPFIX logging, VRF awareness, deterministic CGNAT.
-- NAT64 - NAT64 static and dynamic translation tests.
-- P2P Ethernet Subinterface.
-- PPPoE Encapsulation.
-- SPAN Switch Port Analyzer - packet mirroring.
-- SRv6 Routing - Segment Routing IPv6 tests.
-- TCP/IP Stack - unit tests, builtin client/server transfers.
-- UDP Stack - unit tests.
-- VTR VLAN Tag Rewrites - VLAN tag rewrite tests.
-- VXLAN Tunnels - baseline VXLAN tests including multicast.
-- VXLAN-GPE Tunnels - baseline VXLAN-GPE tunneling including multicast.
-- Other Tests - ping, session, template verification, timer tests.
diff --git a/resources/tools/presentation/environment.py b/resources/tools/presentation/environment.py
index 05376e0e09..a2fa9a0d5b 100644
--- a/resources/tools/presentation/environment.py
+++ b/resources/tools/presentation/environment.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 Cisco and/or its affiliates.
+# Copyright (c) 2018 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -51,35 +51,6 @@ class Environment(object):
"""
return self._env
- def _set_environment_variables(self):
- """Set environment variables.
- """
- logging.info("Setting the environment variables ...")
- # logging.debug("Environment variables before:\n{}".format(os.environ))
-
- count = 1
-
- for var, value in self._env["configuration"].items():
- logging.debug(" {:3d} Setting the variable {} = {}".
- format(count, var, value))
- os.environ[var] = str(value)
- count += 1
-
- for var, value in self._env["paths"].items():
- logging.debug(" {:3d} Setting the variable {} = {}".
- format(count, var, value))
- os.environ[var] = str(value)
- count += 1
-
- for var, value in self._env["urls"].items():
- logging.debug(" {:3d} Setting the variable {} = {}".
- format(count, var, value))
- os.environ[var] = str(value)
- count += 1
-
- # logging.debug("Environment variables after:\n{}".format(os.environ))
- logging.info("Done.")
-
def _make_dirs(self):
"""Create the directories specified in the 'make-dirs' part of
'environment' section in the specification file.
@@ -122,7 +93,6 @@ class Environment(object):
"""Set the environment.
"""
- self._set_environment_variables()
self._make_dirs()
@@ -147,9 +117,10 @@ def clean_environment(env):
if os.path.isdir(dir_to_remove):
try:
shutil.rmtree(dir_to_remove)
- except OSError:
- raise PresentationError("Cannot remove the directory '{}'".
- format(dir_to_remove))
+ except OSError as err:
+ logging.warning("Cannot remove the directory '{}'".
+ format(dir_to_remove))
+ logging.debug(str(err))
else:
logging.warning("The directory '{}' does not exist.".
format(dir_to_remove))
diff --git a/resources/tools/presentation/generator_CPTA.py b/resources/tools/presentation/generator_CPTA.py
index 9195787b46..2c62e11a97 100644
--- a/resources/tools/presentation/generator_CPTA.py
+++ b/resources/tools/presentation/generator_CPTA.py
@@ -14,25 +14,28 @@
"""Generation of Continuous Performance Trending and Analysis.
"""
-import datetime
+import multiprocessing
+import os
import logging
import csv
import prettytable
import plotly.offline as ploff
import plotly.graph_objs as plgo
import plotly.exceptions as plerr
-import numpy as np
import pandas as pd
from collections import OrderedDict
-from utils import find_outliers, archive_input_data, execute_command
+from datetime import datetime
+
+from utils import split_outliers, archive_input_data, execute_command,\
+ classify_anomalies, Worker
# Command to build the html format of the report
HTML_BUILDER = 'sphinx-build -v -c conf_cpta -a ' \
'-b html -E ' \
'-t html ' \
- '-D version="Generated on {date}" ' \
+ '-D version="{date}" ' \
'{working_dir} ' \
'{build_dir}/'
@@ -64,7 +67,7 @@ def generate_cpta(spec, data):
ret_code = _generate_all_charts(spec, data)
cmd = HTML_BUILDER.format(
- date=datetime.date.today().strftime('%d-%b-%Y'),
+ date=datetime.utcnow().strftime('%m/%d/%Y %H:%M UTC'),
working_dir=spec.environment["paths"]["DIR[WORKING,SRC]"],
build_dir=spec.environment["paths"]["DIR[BUILD,HTML]"])
execute_command(cmd)
@@ -84,196 +87,84 @@ def generate_cpta(spec, data):
return ret_code
-def _select_data(in_data, period, fill_missing=False, use_first=False):
- """Select the data from the full data set. The selection is done by picking
- the samples depending on the period: period = 1: All, period = 2: every
- second sample, period = 3: every third sample ...
-
- :param in_data: Full set of data.
- :param period: Sampling period.
- :param fill_missing: If the chosen sample is missing in the full set, its
- nearest neighbour is used.
- :param use_first: Use the first sample even though it is not chosen.
- :type in_data: OrderedDict
- :type period: int
- :type fill_missing: bool
- :type use_first: bool
- :returns: Reduced data.
- :rtype: OrderedDict
- """
-
- first_idx = min(in_data.keys())
- last_idx = max(in_data.keys())
-
- idx = last_idx
- data_dict = dict()
- if use_first:
- data_dict[first_idx] = in_data[first_idx]
- while idx >= first_idx:
- data = in_data.get(idx, None)
- if data is None:
- if fill_missing:
- threshold = int(round(idx - period / 2)) + 1 - period % 2
- idx_low = first_idx if threshold < first_idx else threshold
- threshold = int(round(idx + period / 2))
- idx_high = last_idx if threshold > last_idx else threshold
-
- flag_l = True
- flag_h = True
- idx_lst = list()
- inc = 1
- while flag_l or flag_h:
- if idx + inc > idx_high:
- flag_h = False
- else:
- idx_lst.append(idx + inc)
- if idx - inc < idx_low:
- flag_l = False
- else:
- idx_lst.append(idx - inc)
- inc += 1
-
- for i in idx_lst:
- if i in in_data.keys():
- data_dict[i] = in_data[i]
- break
- else:
- data_dict[idx] = data
- idx -= period
-
- return OrderedDict(sorted(data_dict.items(), key=lambda t: t[0]))
-
-
-def _evaluate_results(in_data, trimmed_data, window=10):
- """Evaluates if the sample value is regress, normal or progress compared to
- previous data within the window.
- We use the intervals defined as:
- - regress: less than median - 3 * stdev
- - normal: between median - 3 * stdev and median + 3 * stdev
- - progress: more than median + 3 * stdev
-
- :param in_data: Full data set.
- :param trimmed_data: Full data set without the outliers.
- :param window: Window size used to calculate moving median and moving stdev.
- :type in_data: pandas.Series
- :type trimmed_data: pandas.Series
- :type window: int
- :returns: Evaluated results.
- :rtype: list
- """
-
- if len(in_data) > 2:
- win_size = in_data.size if in_data.size < window else window
- results = [0.0, ] * win_size
- median = in_data.rolling(window=win_size).median()
- stdev_t = trimmed_data.rolling(window=win_size, min_periods=2).std()
- m_vals = median.values
- s_vals = stdev_t.values
- d_vals = in_data.values
- for day in range(win_size, in_data.size):
- if np.isnan(m_vals[day - 1]) or np.isnan(s_vals[day - 1]):
- results.append(0.0)
- elif d_vals[day] < (m_vals[day - 1] - 3 * s_vals[day - 1]):
- results.append(0.33)
- elif (m_vals[day - 1] - 3 * s_vals[day - 1]) <= d_vals[day] <= \
- (m_vals[day - 1] + 3 * s_vals[day - 1]):
- results.append(0.66)
- else:
- results.append(1.0)
- else:
- results = [0.0, ]
- try:
- median = np.median(in_data)
- stdev = np.std(in_data)
- if in_data.values[-1] < (median - 3 * stdev):
- results.append(0.33)
- elif (median - 3 * stdev) <= in_data.values[-1] <= (
- median + 3 * stdev):
- results.append(0.66)
- else:
- results.append(1.0)
- except TypeError:
- results.append(None)
- return results
-
-
-def _generate_trending_traces(in_data, period, moving_win_size=10,
- fill_missing=True, use_first=False,
- show_moving_median=True, name="", color=""):
+def _generate_trending_traces(in_data, build_info, moving_win_size=10,
+ show_trend_line=True, name="", color=""):
"""Generate the trending traces:
- samples,
- - moving median (trending plot)
+ - trimmed moving median (trending line)
- outliers, regress, progress
:param in_data: Full data set.
- :param period: Sampling period.
+ :param build_info: Information about the builds.
:param moving_win_size: Window size.
- :param fill_missing: If the chosen sample is missing in the full set, its
- nearest neighbour is used.
- :param use_first: Use the first sample even though it is not chosen.
- :param show_moving_median: Show moving median (trending plot).
+ :param show_trend_line: Show moving median (trending plot).
:param name: Name of the plot
:param color: Name of the color for the plot.
:type in_data: OrderedDict
- :type period: int
+ :type build_info: dict
:type moving_win_size: int
- :type fill_missing: bool
- :type use_first: bool
- :type show_moving_median: bool
+ :type show_trend_line: bool
:type name: str
:type color: str
- :returns: Generated traces (list) and the evaluated result (float).
+ :returns: Generated traces (list) and the evaluated result.
:rtype: tuple(traces, result)
"""
- if period > 1:
- in_data = _select_data(in_data, period,
- fill_missing=fill_missing,
- use_first=use_first)
+ data_x = list(in_data.keys())
+ data_y = list(in_data.values())
- data_x = [key for key in in_data.keys()]
- data_y = [val for val in in_data.values()]
- data_pd = pd.Series(data_y, index=data_x)
+ hover_text = list()
+ xaxis = list()
+ for idx in data_x:
+ hover_text.append("vpp-ref: {0}<br>csit-ref: mrr-daily-build-{1}".
+ format(build_info[str(idx)][1].rsplit('~', 1)[0],
+ idx))
+ date = build_info[str(idx)][0]
+ xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
+ int(date[9:11]), int(date[12:])))
- t_data, outliers = find_outliers(data_pd)
+ data_pd = pd.Series(data_y, index=xaxis)
- results = _evaluate_results(data_pd, t_data, window=moving_win_size)
+ t_data, outliers = split_outliers(data_pd, outlier_const=1.5,
+ window=moving_win_size)
+ anomaly_classification = classify_anomalies(t_data, window=moving_win_size)
anomalies = pd.Series()
- anomalies_res = list()
- for idx, item in enumerate(in_data.items()):
- item_pd = pd.Series([item[1], ], index=[item[0], ])
- if item[0] in outliers.keys():
- anomalies = anomalies.append(item_pd)
- anomalies_res.append(0.0)
- elif results[idx] in (0.33, 1.0):
- anomalies = anomalies.append(item_pd)
- anomalies_res.append(results[idx])
- anomalies_res.extend([0.0, 0.33, 0.66, 1.0])
+ anomalies_colors = list()
+ anomaly_color = {
+ "outlier": 0.0,
+ "regression": 0.33,
+ "normal": 0.66,
+ "progression": 1.0
+ }
+ if anomaly_classification:
+ for idx, item in enumerate(data_pd.items()):
+ if anomaly_classification[idx] in \
+ ("outlier", "regression", "progression"):
+ anomalies = anomalies.append(pd.Series([item[1], ],
+ index=[item[0], ]))
+ anomalies_colors.append(
+ anomaly_color[anomaly_classification[idx]])
+ anomalies_colors.extend([0.0, 0.33, 0.66, 1.0])
# Create traces
- color_scale = [[0.00, "grey"],
- [0.25, "grey"],
- [0.25, "red"],
- [0.50, "red"],
- [0.50, "white"],
- [0.75, "white"],
- [0.75, "green"],
- [1.00, "green"]]
trace_samples = plgo.Scatter(
- x=data_x,
+ x=xaxis,
y=data_y,
mode='markers',
line={
"width": 1
},
+ legendgroup=name,
name="{name}-thput".format(name=name),
marker={
"size": 5,
"color": color,
"symbol": "circle",
},
+ text=hover_text,
+ hoverinfo="x+y+text+name"
)
traces = [trace_samples, ]
@@ -282,14 +173,21 @@ def _generate_trending_traces(in_data, period, moving_win_size=10,
y=anomalies.values,
mode='markers',
hoverinfo="none",
- showlegend=False,
+ showlegend=True,
legendgroup=name,
- name="{name}: outliers".format(name=name),
+ name="{name}-anomalies".format(name=name),
marker={
"size": 15,
"symbol": "circle-open",
- "color": anomalies_res,
- "colorscale": color_scale,
+ "color": anomalies_colors,
+ "colorscale": [[0.00, "grey"],
+ [0.25, "grey"],
+ [0.25, "red"],
+ [0.50, "red"],
+ [0.50, "white"],
+ [0.75, "white"],
+ [0.75, "green"],
+ [1.00, "green"]],
"showscale": True,
"line": {
"width": 2
@@ -314,43 +212,24 @@ def _generate_trending_traces(in_data, period, moving_win_size=10,
)
traces.append(trace_anomalies)
- if show_moving_median:
- data_mean_y = pd.Series(data_y).rolling(
- window=moving_win_size, min_periods=2).median()
- trace_median = plgo.Scatter(
- x=data_x,
- y=data_mean_y,
+ if show_trend_line:
+ data_trend = t_data.rolling(window=moving_win_size,
+ min_periods=2).median()
+ trace_trend = plgo.Scatter(
+ x=data_trend.keys(),
+ y=data_trend.tolist(),
mode='lines',
line={
"shape": "spline",
"width": 1,
"color": color,
},
+ legendgroup=name,
name='{name}-trend'.format(name=name)
)
- traces.append(trace_median)
-
- return traces, results[-1]
+ traces.append(trace_trend)
-
-def _generate_chart(traces, layout, file_name):
- """Generates the whole chart using pre-generated traces.
-
- :param traces: Traces for the chart.
- :param layout: Layout of the chart.
- :param file_name: File name for the generated chart.
- :type traces: list
- :type layout: dict
- :type file_name: str
- """
-
- # Create plot
- logging.info(" Writing the file '{0}' ...".format(file_name))
- plpl = plgo.Figure(data=traces, layout=layout)
- try:
- ploff.plot(plpl, show_link=False, auto_open=False, filename=file_name)
- except plerr.PlotlyEmptyDataError:
- logging.warning(" No data for the plot. Skipped.")
+ return traces, anomaly_classification[-1]
def _generate_all_charts(spec, input_data):
@@ -362,50 +241,38 @@ def _generate_all_charts(spec, input_data):
:type input_data: InputData
"""
- job_name = spec.cpta["data"].keys()[0]
+ def _generate_chart(_, data_q, graph):
+ """Generates the chart.
+ """
- builds_lst = list()
- for build in spec.input["builds"][job_name]:
- status = build["status"]
- if status != "failed" and status != "not found":
- builds_lst.append(str(build["build"]))
- print(builds_lst)
+ logs = list()
- # Get "build ID": "date" dict:
- build_dates = dict()
- for build in builds_lst:
- try:
- build_dates[build] = \
- input_data.metadata(job_name, build)["generated"][:14]
- except KeyError:
- pass
+ logging.info(" Generating the chart '{0}' ...".
+ format(graph.get("title", "")))
+ logs.append(("INFO", " Generating the chart '{0}' ...".
+ format(graph.get("title", ""))))
- # Create the header:
- csv_table = list()
- header = "Build Number:," + ",".join(builds_lst) + '\n'
- csv_table.append(header)
- header = "Build Date:," + ",".join(build_dates.values()) + '\n'
- csv_table.append(header)
+ job_name = spec.cpta["data"].keys()[0]
- results = list()
- for chart in spec.cpta["plots"]:
- logging.info(" Generating the chart '{0}' ...".
- format(chart.get("title", "")))
+ csv_tbl = list()
+ res = list()
# Transform the data
- data = input_data.filter_data(chart, continue_on_error=True)
+ logs.append(("INFO", " Creating the data set for the {0} '{1}'.".
+ format(graph.get("type", ""), graph.get("title", ""))))
+ data = input_data.filter_data(graph, continue_on_error=True)
if data is None:
logging.error("No data.")
return
chart_data = dict()
for job in data:
- for idx, build in job.items():
- for test_name, test in build.items():
+ for index, bld in job.items():
+ for test_name, test in bld.items():
if chart_data.get(test_name, None) is None:
chart_data[test_name] = OrderedDict()
try:
- chart_data[test_name][int(idx)] = \
+ chart_data[test_name][int(index)] = \
test["result"]["throughput"]
except (KeyError, TypeError):
pass
@@ -413,46 +280,130 @@ def _generate_all_charts(spec, input_data):
# Add items to the csv table:
for tst_name, tst_data in chart_data.items():
tst_lst = list()
- for build in builds_lst:
- item = tst_data.get(int(build), '')
- tst_lst.append(str(item) if item else '')
- csv_table.append("{0},".format(tst_name) + ",".join(tst_lst) + '\n')
-
- for period in chart["periods"]:
- # Generate traces:
- traces = list()
- win_size = 10 if period == 1 else 5 if period < 20 else 3
- idx = 0
- for test_name, test_data in chart_data.items():
- if not test_data:
- logging.warning("No data for the test '{0}'".
- format(test_name))
- continue
- test_name = test_name.split('.')[-1]
- trace, result = _generate_trending_traces(
- test_data,
- period=period,
- moving_win_size=win_size,
- fill_missing=True,
- use_first=False,
- name='-'.join(test_name.split('-')[3:-1]),
- color=COLORS[idx])
- traces.extend(trace)
- results.append(result)
- idx += 1
-
+ for bld in builds_lst:
+ itm = tst_data.get(int(bld), '')
+ tst_lst.append(str(itm))
+ csv_tbl.append("{0},".format(tst_name) + ",".join(tst_lst) + '\n')
+ # Generate traces:
+ traces = list()
+ win_size = 14
+ index = 0
+ for test_name, test_data in chart_data.items():
+ if not test_data:
+ logs.append(("WARNING", "No data for the test '{0}'".
+ format(test_name)))
+ continue
+ test_name = test_name.split('.')[-1]
+ trace, rslt = _generate_trending_traces(
+ test_data,
+ build_info=build_info,
+ moving_win_size=win_size,
+ name='-'.join(test_name.split('-')[3:-1]),
+ color=COLORS[index])
+ traces.extend(trace)
+ res.append(rslt)
+ index += 1
+
+ if traces:
# Generate the chart:
- chart["layout"]["xaxis"]["title"] = \
- chart["layout"]["xaxis"]["title"].format(job=job_name)
- _generate_chart(traces,
- chart["layout"],
- file_name="{0}-{1}-{2}{3}".format(
- spec.cpta["output-file"],
- chart["output-file-name"],
- period,
- spec.cpta["output-file-type"]))
-
- logging.info(" Done.")
+ graph["layout"]["xaxis"]["title"] = \
+ graph["layout"]["xaxis"]["title"].format(job=job_name)
+ name_file = "{0}-{1}{2}".format(spec.cpta["output-file"],
+ graph["output-file-name"],
+ spec.cpta["output-file-type"])
+
+ logs.append(("INFO", " Writing the file '{0}' ...".
+ format(name_file)))
+ plpl = plgo.Figure(data=traces, layout=graph["layout"])
+ try:
+ ploff.plot(plpl, show_link=False, auto_open=False,
+ filename=name_file)
+ except plerr.PlotlyEmptyDataError:
+ logs.append(("WARNING", "No data for the plot. Skipped."))
+
+ data_out = {
+ "csv_table": csv_tbl,
+ "results": res,
+ "logs": logs
+ }
+ data_q.put(data_out)
+
+ job_name = spec.cpta["data"].keys()[0]
+
+ builds_lst = list()
+ for build in spec.input["builds"][job_name]:
+ status = build["status"]
+ if status != "failed" and status != "not found":
+ builds_lst.append(str(build["build"]))
+
+ # Get "build ID": "date" dict:
+ build_info = OrderedDict()
+ for build in builds_lst:
+ try:
+ build_info[build] = (
+ input_data.metadata(job_name, build)["generated"][:14],
+ input_data.metadata(job_name, build)["version"]
+ )
+ except KeyError:
+ build_info[build] = ("", "")
+
+ work_queue = multiprocessing.JoinableQueue()
+ manager = multiprocessing.Manager()
+ data_queue = manager.Queue()
+ cpus = multiprocessing.cpu_count()
+
+ workers = list()
+ for cpu in range(cpus):
+ worker = Worker(work_queue,
+ data_queue,
+ _generate_chart)
+ worker.daemon = True
+ worker.start()
+ workers.append(worker)
+ os.system("taskset -p -c {0} {1} > /dev/null 2>&1".
+ format(cpu, worker.pid))
+
+ for chart in spec.cpta["plots"]:
+ work_queue.put((chart, ))
+ work_queue.join()
+
+ anomaly_classifications = list()
+
+ # Create the header:
+ csv_table = list()
+ header = "Build Number:," + ",".join(builds_lst) + '\n'
+ csv_table.append(header)
+ build_dates = [x[0] for x in build_info.values()]
+ header = "Build Date:," + ",".join(build_dates) + '\n'
+ csv_table.append(header)
+ vpp_versions = [x[1] for x in build_info.values()]
+ header = "VPP Version:," + ",".join(vpp_versions) + '\n'
+ csv_table.append(header)
+
+ while not data_queue.empty():
+ result = data_queue.get()
+
+ anomaly_classifications.extend(result["results"])
+ csv_table.extend(result["csv_table"])
+
+ for item in result["logs"]:
+ if item[0] == "INFO":
+ logging.info(item[1])
+ elif item[0] == "ERROR":
+ logging.error(item[1])
+ elif item[0] == "DEBUG":
+ logging.debug(item[1])
+ elif item[0] == "CRITICAL":
+ logging.critical(item[1])
+ elif item[0] == "WARNING":
+ logging.warning(item[1])
+
+ del data_queue
+
+ # Terminate all workers
+ for worker in workers:
+ worker.terminate()
+ worker.join()
# Write the tables:
file_name = spec.cpta["output-file"] + "-trending"
@@ -473,24 +424,27 @@ def _generate_all_charts(spec, input_data):
row[idx] = str(round(float(item) / 1000000, 2))
except ValueError:
pass
- txt_table.add_row(row)
+ try:
+ txt_table.add_row(row)
+ except Exception as err:
+ logging.warning("Error occurred while generating TXT table:"
+ "\n{0}".format(err))
line_nr += 1
txt_table.align["Build Number:"] = "l"
with open("{0}.txt".format(file_name), "w") as txt_file:
txt_file.write(str(txt_table))
# Evaluate result:
- result = "PASS"
- for item in results:
- if item is None:
- result = "FAIL"
- break
- if item == 0.66 and result == "PASS":
- result = "PASS"
- elif item == 0.33 or item == 0.0:
- result = "FAIL"
-
- logging.info("Partial results: {0}".format(results))
+ if anomaly_classifications:
+ result = "PASS"
+ for classification in anomaly_classifications:
+ if classification == "regression" or classification == "outlier":
+ result = "FAIL"
+ break
+ else:
+ result = "FAIL"
+
+ logging.info("Partial results: {0}".format(anomaly_classifications))
logging.info("Result: {0}".format(result))
return result
diff --git a/resources/tools/presentation/generator_files.py b/resources/tools/presentation/generator_files.py
index 1cd1b6dfbb..e717815cd0 100644
--- a/resources/tools/presentation/generator_files.py
+++ b/resources/tools/presentation/generator_files.py
@@ -141,9 +141,13 @@ def file_merged_test_results(file_spec, input_data):
logging.info(" Writing file '{0}'".format(file_name))
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(file_spec.get("type", ""), file_spec.get("title", "")))
tests = input_data.filter_data(file_spec)
tests = input_data.merge_data(tests)
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(file_spec.get("type", ""), file_spec.get("title", "")))
suites = input_data.filter_data(file_spec, data_set="suites")
suites = input_data.merge_data(suites)
suites.sort_index(inplace=True)
diff --git a/resources/tools/presentation/generator_plots.py b/resources/tools/presentation/generator_plots.py
index b7fd420aa2..6faf4c3935 100644
--- a/resources/tools/presentation/generator_plots.py
+++ b/resources/tools/presentation/generator_plots.py
@@ -59,6 +59,8 @@ def plot_performance_box(plot, input_data):
format(plot.get("title", "")))
# Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(plot.get("type", ""), plot.get("title", "")))
data = input_data.filter_data(plot)
if data is None:
logging.error("No data.")
@@ -129,6 +131,8 @@ def plot_latency_box(plot, input_data):
format(plot.get("title", "")))
# Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(plot.get("type", ""), plot.get("title", "")))
data = input_data.filter_data(plot)
if data is None:
logging.error("No data.")
@@ -236,6 +240,8 @@ def plot_throughput_speedup_analysis(plot, input_data):
format(plot.get("title", "")))
# Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(plot.get("type", ""), plot.get("title", "")))
data = input_data.filter_data(plot)
if data is None:
logging.error("No data.")
@@ -335,6 +341,8 @@ def plot_http_server_performance_box(plot, input_data):
format(plot.get("title", "")))
# Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(plot.get("type", ""), plot.get("title", "")))
data = input_data.filter_data(plot)
if data is None:
logging.error("No data.")
diff --git a/resources/tools/presentation/generator_report.py b/resources/tools/presentation/generator_report.py
index 6819f350b6..07103dbb1f 100644
--- a/resources/tools/presentation/generator_report.py
+++ b/resources/tools/presentation/generator_report.py
@@ -103,7 +103,7 @@ def generate_html_report(release, spec, versions):
cmd = HTML_BUILDER.format(
release=release,
- date=datetime.date.today().strftime('%d-%b-%Y'),
+ date=datetime.datetime.utcnow().strftime('%m/%d/%Y %H:%M UTC'),
working_dir=spec.environment["paths"]["DIR[WORKING,SRC]"],
build_dir=spec.environment["paths"]["DIR[BUILD,HTML]"])
execute_command(cmd)
@@ -148,7 +148,7 @@ def generate_pdf_report(release, spec, versions):
build_dir = spec.environment["paths"]["DIR[BUILD,LATEX]"]
cmd = PDF_BUILDER.format(
release=release,
- date=datetime.date.today().strftime('%d-%b-%Y'),
+ date=datetime.datetime.utcnow().strftime('%m/%d/%Y %H:%M UTC'),
working_dir=spec.environment["paths"]["DIR[WORKING,SRC]"],
build_dir=build_dir)
execute_command(cmd)
diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py
index a667fffb16..8791ae5804 100644
--- a/resources/tools/presentation/generator_tables.py
+++ b/resources/tools/presentation/generator_tables.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 Cisco and/or its affiliates.
+# Copyright (c) 2018 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -18,11 +18,16 @@
import logging
import csv
import prettytable
+import pandas as pd
from string import replace
+from collections import OrderedDict
+from numpy import nan, isnan
+from xml.etree import ElementTree as ET
from errors import PresentationError
-from utils import mean, stdev, relative_change, remove_outliers
+from utils import mean, stdev, relative_change, remove_outliers,\
+ split_outliers, classify_anomalies
def generate_tables(spec, data):
@@ -58,6 +63,8 @@ def table_details(table, input_data):
format(table.get("title", "")))
# Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
data = input_data.filter_data(table)
# Prepare the header of the tables
@@ -124,10 +131,14 @@ def table_merged_details(table, input_data):
format(table.get("title", "")))
# Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
data = input_data.filter_data(table)
data = input_data.merge_data(data)
data.sort_index(inplace=True)
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
suites = input_data.filter_data(table, data_set="suites")
suites = input_data.merge_data(suites)
@@ -221,6 +232,8 @@ def table_performance_improvements(table, input_data):
return None
# Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
data = input_data.filter_data(table)
# Prepare the header of the tables
@@ -352,16 +365,26 @@ def table_performance_comparison(table, input_data):
format(table.get("title", "")))
# Transform the data
- data = input_data.filter_data(table)
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
+ data = input_data.filter_data(table, continue_on_error=True)
# Prepare the header of the tables
try:
- header = ["Test case",
- "{0} Throughput [Mpps]".format(table["reference"]["title"]),
- "{0} stdev [Mpps]".format(table["reference"]["title"]),
- "{0} Throughput [Mpps]".format(table["compare"]["title"]),
- "{0} stdev [Mpps]".format(table["compare"]["title"]),
- "Change [%]"]
+ header = ["Test case", ]
+
+ history = table.get("history", None)
+ if history:
+ for item in history:
+ header.extend(
+ ["{0} Throughput [Mpps]".format(item["title"]),
+ "{0} Stdev [Mpps]".format(item["title"])])
+ header.extend(
+ ["{0} Throughput [Mpps]".format(table["reference"]["title"]),
+ "{0} Stdev [Mpps]".format(table["reference"]["title"]),
+ "{0} Throughput [Mpps]".format(table["compare"]["title"]),
+ "{0} Stdev [Mpps]".format(table["compare"]["title"]),
+ "Change [%]"])
header_str = ",".join(header) + "\n"
except (AttributeError, KeyError) as err:
logging.error("The model is invalid, missing parameter: {0}".
@@ -396,27 +419,68 @@ def table_performance_comparison(table, input_data):
pass
except TypeError:
tbl_dict.pop(tst_name, None)
+ if history:
+ for item in history:
+ for job, builds in item["data"].items():
+ for build in builds:
+ for tst_name, tst_data in data[job][str(build)].iteritems():
+ if tbl_dict.get(tst_name, None) is None:
+ continue
+ if tbl_dict[tst_name].get("history", None) is None:
+ tbl_dict[tst_name]["history"] = OrderedDict()
+ if tbl_dict[tst_name]["history"].get(item["title"],
+ None) is None:
+ tbl_dict[tst_name]["history"][item["title"]] = \
+ list()
+ try:
+ tbl_dict[tst_name]["history"][item["title"]].\
+ append(tst_data["throughput"]["value"])
+ except (TypeError, KeyError):
+ pass
tbl_lst = list()
for tst_name in tbl_dict.keys():
item = [tbl_dict[tst_name]["name"], ]
+ if history:
+ if tbl_dict[tst_name].get("history", None) is not None:
+ for hist_data in tbl_dict[tst_name]["history"].values():
+ if hist_data:
+ data_t = remove_outliers(
+ hist_data, outlier_const=table["outlier-const"])
+ if data_t:
+ item.append(round(mean(data_t) / 1000000, 2))
+ item.append(round(stdev(data_t) / 1000000, 2))
+ else:
+ item.extend([None, None])
+ else:
+ item.extend([None, None])
+ else:
+ item.extend([None, None])
if tbl_dict[tst_name]["ref-data"]:
data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
- table["outlier-const"])
- item.append(round(mean(data_t) / 1000000, 2))
- item.append(round(stdev(data_t) / 1000000, 2))
+ outlier_const=table["outlier-const"])
+ # TODO: Specify window size.
+ if data_t:
+ item.append(round(mean(data_t) / 1000000, 2))
+ item.append(round(stdev(data_t) / 1000000, 2))
+ else:
+ item.extend([None, None])
else:
item.extend([None, None])
if tbl_dict[tst_name]["cmp-data"]:
data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
- table["outlier-const"])
- item.append(round(mean(data_t) / 1000000, 2))
- item.append(round(stdev(data_t) / 1000000, 2))
+ outlier_const=table["outlier-const"])
+ # TODO: Specify window size.
+ if data_t:
+ item.append(round(mean(data_t) / 1000000, 2))
+ item.append(round(stdev(data_t) / 1000000, 2))
+ else:
+ item.extend([None, None])
else:
item.extend([None, None])
- if item[1] is not None and item[3] is not None:
- item.append(int(relative_change(float(item[1]), float(item[3]))))
- if len(item) == 6:
+ if item[-4] is not None and item[-2] is not None and item[-4] != 0:
+ item.append(int(relative_change(float(item[-4]), float(item[-2]))))
+ if len(item) == len(header):
tbl_lst.append(item)
# Sort the table according to the relative change
@@ -438,7 +502,7 @@ def table_performance_comparison(table, input_data):
table["output-file-ext"])
]
for file_name in tbl_names:
- logging.info(" Writing file: '{}'".format(file_name))
+ logging.info(" Writing file: '{0}'".format(file_name))
with open(file_name, "w") as file_handler:
file_handler.write(header_str)
for test in tbl_lst:
@@ -459,7 +523,7 @@ def table_performance_comparison(table, input_data):
for i, txt_name in enumerate(tbl_names_txt):
txt_table = None
- logging.info(" Writing file: '{}'".format(txt_name))
+ logging.info(" Writing file: '{0}'".format(txt_name))
with open(tbl_names[i], 'rb') as csv_file:
csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
for row in csv_content:
@@ -481,7 +545,7 @@ def table_performance_comparison(table, input_data):
output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
table["output-file-ext"])
- logging.info(" Writing file: '{}'".format(output_file))
+ logging.info(" Writing file: '{0}'".format(output_file))
with open(output_file, "w") as out_file:
out_file.write(header_str)
for i, line in enumerate(lines[1:]):
@@ -491,7 +555,7 @@ def table_performance_comparison(table, input_data):
output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
table["output-file-ext"])
- logging.info(" Writing file: '{}'".format(output_file))
+ logging.info(" Writing file: '{0}'".format(output_file))
with open(output_file, "w") as out_file:
out_file.write(header_str)
for i, line in enumerate(lines[-1:0:-1]):
@@ -508,7 +572,7 @@ def table_performance_comparison(table, input_data):
output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
table["output-file-ext"])
- logging.info(" Writing file: '{}'".format(output_file))
+ logging.info(" Writing file: '{0}'".format(output_file))
with open(output_file, "w") as out_file:
out_file.write(header_str)
for i, line in enumerate(lines[1:]):
@@ -518,10 +582,424 @@ def table_performance_comparison(table, input_data):
output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
table["output-file-ext"])
- logging.info(" Writing file: '{}'".format(output_file))
+ logging.info(" Writing file: '{0}'".format(output_file))
with open(output_file, "w") as out_file:
out_file.write(header_str)
for i, line in enumerate(lines[-1:0:-1]):
if i == table["nr-of-tests-shown"]:
break
out_file.write(line)
+
+
+def table_performance_comparison_mrr(table, input_data):
+ """Generate the table(s) with algorithm: table_performance_comparison_mrr
+ specified in the specification file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+
+ logging.info(" Generating the table {0} ...".
+ format(table.get("title", "")))
+
+ # Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
+ data = input_data.filter_data(table, continue_on_error=True)
+
+ # Prepare the header of the tables
+ try:
+ header = ["Test case",
+ "{0} Throughput [Mpps]".format(table["reference"]["title"]),
+ "{0} stdev [Mpps]".format(table["reference"]["title"]),
+ "{0} Throughput [Mpps]".format(table["compare"]["title"]),
+ "{0} stdev [Mpps]".format(table["compare"]["title"]),
+ "Change [%]"]
+ header_str = ",".join(header) + "\n"
+ except (AttributeError, KeyError) as err:
+ logging.error("The model is invalid, missing parameter: {0}".
+ format(err))
+ return
+
+ # Prepare data to the table:
+ tbl_dict = dict()
+ for job, builds in table["reference"]["data"].items():
+ for build in builds:
+ for tst_name, tst_data in data[job][str(build)].iteritems():
+ if tbl_dict.get(tst_name, None) is None:
+ name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
+ "-".join(tst_data["name"].
+ split("-")[1:]))
+ tbl_dict[tst_name] = {"name": name,
+ "ref-data": list(),
+ "cmp-data": list()}
+ try:
+ tbl_dict[tst_name]["ref-data"].\
+ append(tst_data["result"]["throughput"])
+ except TypeError:
+ pass # No data in output.xml for this test
+
+ for job, builds in table["compare"]["data"].items():
+ for build in builds:
+ for tst_name, tst_data in data[job][str(build)].iteritems():
+ try:
+ tbl_dict[tst_name]["cmp-data"].\
+ append(tst_data["result"]["throughput"])
+ except KeyError:
+ pass
+ except TypeError:
+ tbl_dict.pop(tst_name, None)
+
+ tbl_lst = list()
+ for tst_name in tbl_dict.keys():
+ item = [tbl_dict[tst_name]["name"], ]
+ if tbl_dict[tst_name]["ref-data"]:
+ data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
+ outlier_const=table["outlier-const"])
+ # TODO: Specify window size.
+ if data_t:
+ item.append(round(mean(data_t) / 1000000, 2))
+ item.append(round(stdev(data_t) / 1000000, 2))
+ else:
+ item.extend([None, None])
+ else:
+ item.extend([None, None])
+ if tbl_dict[tst_name]["cmp-data"]:
+ data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
+ outlier_const=table["outlier-const"])
+ # TODO: Specify window size.
+ if data_t:
+ item.append(round(mean(data_t) / 1000000, 2))
+ item.append(round(stdev(data_t) / 1000000, 2))
+ else:
+ item.extend([None, None])
+ else:
+ item.extend([None, None])
+ if item[1] is not None and item[3] is not None and item[1] != 0:
+ item.append(int(relative_change(float(item[1]), float(item[3]))))
+ if len(item) == 6:
+ tbl_lst.append(item)
+
+ # Sort the table according to the relative change
+ tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
+
+ # Generate tables:
+ # All tests in csv:
+ tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
+ table["output-file-ext"]),
+ "{0}-2t2c-full{1}".format(table["output-file"],
+ table["output-file-ext"]),
+ "{0}-4t4c-full{1}".format(table["output-file"],
+ table["output-file-ext"])
+ ]
+ for file_name in tbl_names:
+ logging.info(" Writing file: '{0}'".format(file_name))
+ with open(file_name, "w") as file_handler:
+ file_handler.write(header_str)
+ for test in tbl_lst:
+ if file_name.split("-")[-2] in test[0]: # cores
+ test[0] = "-".join(test[0].split("-")[:-1])
+ file_handler.write(",".join([str(item) for item in test]) +
+ "\n")
+
+ # All tests in txt:
+ tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
+ "{0}-2t2c-full.txt".format(table["output-file"]),
+ "{0}-4t4c-full.txt".format(table["output-file"])
+ ]
+
+ for i, txt_name in enumerate(tbl_names_txt):
+ txt_table = None
+ logging.info(" Writing file: '{0}'".format(txt_name))
+ with open(tbl_names[i], 'rb') as csv_file:
+ csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
+ for row in csv_content:
+ if txt_table is None:
+ txt_table = prettytable.PrettyTable(row)
+ else:
+ txt_table.add_row(row)
+ txt_table.align["Test case"] = "l"
+ with open(txt_name, "w") as txt_file:
+ txt_file.write(str(txt_table))
+
+
+def table_performance_trending_dashboard(table, input_data):
+ """Generate the table(s) with algorithm: table_performance_comparison
+ specified in the specification file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+
+ logging.info(" Generating the table {0} ...".
+ format(table.get("title", "")))
+
+ # Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
+ data = input_data.filter_data(table, continue_on_error=True)
+
+ # Prepare the header of the tables
+ header = ["Test Case",
+ "Trend [Mpps]",
+ "Short-Term Change [%]",
+ "Long-Term Change [%]",
+ "Regressions [#]",
+ "Progressions [#]",
+ "Outliers [#]"
+ ]
+ header_str = ",".join(header) + "\n"
+
+ # Prepare data to the table:
+ tbl_dict = dict()
+ for job, builds in table["data"].items():
+ for build in builds:
+ for tst_name, tst_data in data[job][str(build)].iteritems():
+ if tst_name.lower() in table["ignore-list"]:
+ continue
+ if tbl_dict.get(tst_name, None) is None:
+ name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
+ "-".join(tst_data["name"].
+ split("-")[1:]))
+ tbl_dict[tst_name] = {"name": name,
+ "data": OrderedDict()}
+ try:
+ tbl_dict[tst_name]["data"][str(build)] = \
+ tst_data["result"]["throughput"]
+ except (TypeError, KeyError):
+ pass # No data in output.xml for this test
+
+ tbl_lst = list()
+ for tst_name in tbl_dict.keys():
+ if len(tbl_dict[tst_name]["data"]) < 3:
+ continue
+
+ pd_data = pd.Series(tbl_dict[tst_name]["data"])
+ data_t, _ = split_outliers(pd_data, outlier_const=1.5,
+ window=table["window"])
+ last_key = data_t.keys()[-1]
+ win_size = min(data_t.size, table["window"])
+ win_first_idx = data_t.size - win_size
+ key_14 = data_t.keys()[win_first_idx]
+ long_win_size = min(data_t.size, table["long-trend-window"])
+ median_t = data_t.rolling(window=win_size, min_periods=2).median()
+ median_first_idx = median_t.size - long_win_size
+ try:
+ max_median = max(
+ [x for x in median_t.values[median_first_idx:-win_size]
+ if not isnan(x)])
+ except ValueError:
+ max_median = nan
+ try:
+ last_median_t = median_t[last_key]
+ except KeyError:
+ last_median_t = nan
+ try:
+ median_t_14 = median_t[key_14]
+ except KeyError:
+ median_t_14 = nan
+
+ if isnan(last_median_t) or isnan(median_t_14) or median_t_14 == 0.0:
+ rel_change_last = nan
+ else:
+ rel_change_last = round(
+ ((last_median_t - median_t_14) / median_t_14) * 100, 2)
+
+ if isnan(max_median) or isnan(last_median_t) or max_median == 0.0:
+ rel_change_long = nan
+ else:
+ rel_change_long = round(
+ ((last_median_t - max_median) / max_median) * 100, 2)
+
+ # Classification list:
+ classification_lst = classify_anomalies(data_t, window=14)
+
+ if classification_lst:
+ tbl_lst.append(
+ [tbl_dict[tst_name]["name"],
+ '-' if isnan(last_median_t) else
+ round(last_median_t / 1000000, 2),
+ '-' if isnan(rel_change_last) else rel_change_last,
+ '-' if isnan(rel_change_long) else rel_change_long,
+ classification_lst[win_first_idx:].count("regression"),
+ classification_lst[win_first_idx:].count("progression"),
+ classification_lst[win_first_idx:].count("outlier")])
+
+ tbl_lst.sort(key=lambda rel: rel[0])
+
+ tbl_sorted = list()
+ for nrr in range(table["window"], -1, -1):
+ tbl_reg = [item for item in tbl_lst if item[4] == nrr]
+ for nrp in range(table["window"], -1, -1):
+ tbl_pro = [item for item in tbl_reg if item[5] == nrp]
+ for nro in range(table["window"], -1, -1):
+ tbl_out = [item for item in tbl_pro if item[6] == nro]
+ tbl_out.sort(key=lambda rel: rel[2])
+ tbl_sorted.extend(tbl_out)
+
+ file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
+
+ logging.info(" Writing file: '{0}'".format(file_name))
+ with open(file_name, "w") as file_handler:
+ file_handler.write(header_str)
+ for test in tbl_sorted:
+ file_handler.write(",".join([str(item) for item in test]) + '\n')
+
+ txt_file_name = "{0}.txt".format(table["output-file"])
+ txt_table = None
+ logging.info(" Writing file: '{0}'".format(txt_file_name))
+ with open(file_name, 'rb') as csv_file:
+ csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
+ for row in csv_content:
+ if txt_table is None:
+ txt_table = prettytable.PrettyTable(row)
+ else:
+ txt_table.add_row(row)
+ txt_table.align["Test case"] = "l"
+ with open(txt_file_name, "w") as txt_file:
+ txt_file.write(str(txt_table))
+
+
+def table_performance_trending_dashboard_html(table, input_data):
+ """Generate the table(s) with algorithm:
+ table_performance_trending_dashboard_html specified in the specification
+ file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+
+ logging.info(" Generating the table {0} ...".
+ format(table.get("title", "")))
+
+ try:
+ with open(table["input-file"], 'rb') as csv_file:
+ csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
+ csv_lst = [item for item in csv_content]
+ except KeyError:
+ logging.warning("The input file is not defined.")
+ return
+ except csv.Error as err:
+ logging.warning("Not possible to process the file '{0}'.\n{1}".
+ format(table["input-file"], err))
+ return
+
+ # Table:
+ dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
+
+ # Table header:
+ tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
+ for idx, item in enumerate(csv_lst[0]):
+ alignment = "left" if idx == 0 else "center"
+ th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
+ th.text = item
+
+ # Rows:
+ colors = {"regression": ("#ffcccc", "#ff9999"),
+ "progression": ("#c6ecc6", "#9fdf9f"),
+ "outlier": ("#e6e6e6", "#cccccc"),
+ "normal": ("#e9f1fb", "#d4e4f7")}
+ for r_idx, row in enumerate(csv_lst[1:]):
+ if int(row[4]):
+ color = "regression"
+ elif int(row[5]):
+ color = "progression"
+ elif int(row[6]):
+ color = "outlier"
+ else:
+ color = "normal"
+ background = colors[color][r_idx % 2]
+ tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
+
+ # Columns:
+ for c_idx, item in enumerate(row):
+ alignment = "left" if c_idx == 0 else "center"
+ td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
+ # Name:
+ url = "../trending/"
+ file_name = ""
+ anchor = "#"
+ feature = ""
+ if c_idx == 0:
+ if "memif" in item:
+ file_name = "container_memif.html"
+
+ elif "srv6" in item:
+ file_name = "srv6.html"
+
+ elif "vhost" in item:
+ if "l2xcbase" in item or "l2bdbasemaclrn" in item:
+ file_name = "vm_vhost_l2.html"
+ elif "ip4base" in item:
+ file_name = "vm_vhost_ip4.html"
+
+ elif "ipsec" in item:
+ file_name = "ipsec.html"
+
+ elif "ethip4lispip" in item or "ethip4vxlan" in item:
+ file_name = "ip4_tunnels.html"
+
+ elif "ip4base" in item or "ip4scale" in item:
+ file_name = "ip4.html"
+ if "iacl" in item or "snat" in item or "cop" in item:
+ feature = "-features"
+
+ elif "ip6base" in item or "ip6scale" in item:
+ file_name = "ip6.html"
+
+ elif "l2xcbase" in item or "l2xcscale" in item \
+ or "l2bdbasemaclrn" in item or "l2bdscale" in item \
+ or "l2dbbasemaclrn" in item or "l2dbscale" in item:
+ file_name = "l2.html"
+ if "iacl" in item:
+ feature = "-features"
+
+ if "x520" in item:
+ anchor += "x520-"
+ elif "x710" in item:
+ anchor += "x710-"
+ elif "xl710" in item:
+ anchor += "xl710-"
+
+ if "64b" in item:
+ anchor += "64b-"
+ elif "78b" in item:
+ anchor += "78b-"
+ elif "imix" in item:
+ anchor += "imix-"
+ elif "9000b" in item:
+ anchor += "9000b-"
+ elif "1518" in item:
+ anchor += "1518b-"
+
+ if "1t1c" in item:
+ anchor += "1t1c"
+ elif "2t2c" in item:
+ anchor += "2t2c"
+ elif "4t4c" in item:
+ anchor += "4t4c"
+
+ url = url + file_name + anchor + feature
+
+ ref = ET.SubElement(td, "a", attrib=dict(href=url))
+ ref.text = item
+
+ if c_idx > 0:
+ td.text = item
+
+ try:
+ with open(table["output-file"], 'w') as html_file:
+ logging.info(" Writing file: '{0}'".
+ format(table["output-file"]))
+ html_file.write(".. raw:: html\n\n\t")
+ html_file.write(ET.tostring(dashboard))
+ html_file.write("\n\t<p><br><br></p>\n")
+ except KeyError:
+ logging.warning("The output file is not defined.")
+ return
diff --git a/resources/tools/presentation/input_data_files.py b/resources/tools/presentation/input_data_files.py
index d81f64fbe6..cde6d1acc4 100644
--- a/resources/tools/presentation/input_data_files.py
+++ b/resources/tools/presentation/input_data_files.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 Cisco and/or its affiliates.
+# Copyright (c) 2018 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -16,12 +16,9 @@ Download all data.
"""
import re
-import gzip
-import logging
-from os import rename, remove
-from os.path import join, getsize
-from shutil import move
+from os import rename, mkdir
+from os.path import join
from zipfile import ZipFile, is_zipfile, BadZipfile
from httplib import responses
from requests import get, codes, RequestException, Timeout, TooManyRedirects, \
@@ -39,216 +36,195 @@ SEPARATOR = "__"
REGEX_RELEASE = re.compile(r'(\D*)(\d{4}|master)(\D*)')
-def download_data_files(spec):
- """Download all data specified in the specification file in the section
- type: input --> builds.
+def _download_file(url, file_name, log):
+ """Download a file with input data.
- :param spec: Specification.
+ :param url: URL to the file to download.
+ :param file_name: Name of file to download.
+ :param log: List of log messages.
+ :type url: str
+ :type file_name: str
+ :type log: list of tuples (severity, msg)
+ :returns: True if the download was successful, otherwise False.
+ :rtype: bool
+ """
+
+ success = False
+ try:
+ log.append(("INFO", " Connecting to '{0}' ...".format(url)))
+
+ response = get(url, stream=True)
+ code = response.status_code
+
+ log.append(("INFO", " {0}: {1}".format(code, responses[code])))
+
+ if code != codes["OK"]:
+ return False
+
+ log.append(("INFO", " Downloading the file '{0}' to '{1}' ...".
+ format(url, file_name)))
+
+ file_handle = open(file_name, "wb")
+ for chunk in response.iter_content(chunk_size=CHUNK_SIZE):
+ if chunk:
+ file_handle.write(chunk)
+ file_handle.close()
+ success = True
+ except ConnectionError as err:
+ log.append(("ERROR", "Not possible to connect to '{0}'.".format(url)))
+ log.append(("DEBUG", str(err)))
+ except HTTPError as err:
+ log.append(("ERROR", "Invalid HTTP response from '{0}'.".format(url)))
+ log.append(("DEBUG", str(err)))
+ except TooManyRedirects as err:
+ log.append(("ERROR", "Request exceeded the configured number "
+ "of maximum re-directions."))
+ log.append(("DEBUG", str(err)))
+ except Timeout as err:
+ log.append(("ERROR", "Request timed out."))
+ log.append(("DEBUG", str(err)))
+ except RequestException as err:
+ log.append(("ERROR", "Unexpected HTTP request exception."))
+ log.append(("DEBUG", str(err)))
+ except (IOError, ValueError, KeyError) as err:
+ log.append(("ERROR", "Download failed."))
+ log.append(("DEBUG", str(err)))
+
+ log.append(("INFO", " Download finished."))
+ return success
+
+
+def _unzip_file(spec, build, pid, log):
+ """Unzip downloaded source file.
+
+ :param spec: Specification read form the specification file.
+ :param build: Information about the build.
+ :param log: List of log messages.
:type spec: Specification
- :raises: PresentationError if there is no url defined for the job.
+ :type build: dict
+ :type log: list of tuples (severity, msg)
+ :returns: True if the download was successful, otherwise False.
+ :rtype: bool
"""
- for job, builds in spec.builds.items():
- for build in builds:
- if job.startswith("csit-"):
- if spec.input["file-name"].endswith(".zip"):
- url = spec.environment["urls"]["URL[JENKINS,CSIT]"]
- elif spec.input["file-name"].endswith(".gz"):
- url = spec.environment["urls"]["URL[NEXUS,LOG]"]
- else:
- logging.error("Not supported file format.")
- continue
- elif job.startswith("hc2vpp-"):
- url = spec.environment["urls"]["URL[JENKINS,HC]"]
- else:
- raise PresentationError("No url defined for the job '{}'.".
- format(job))
- file_name = spec.input["file-name"]
- full_name = spec.input["download-path"].\
- format(job=job, build=build["build"], filename=file_name)
- url = "{0}/{1}".format(url, full_name)
- new_name = join(
- spec.environment["paths"]["DIR[WORKING,DATA]"],
- "{job}{sep}{build}{sep}{name}".format(job=job,
- sep=SEPARATOR,
- build=build["build"],
- name=file_name))
- logging.info(
- "Downloading the file '{0}' to '{1}' ...".format(url, new_name))
-
- status = "failed"
- try:
- response = get(url, stream=True)
- code = response.status_code
-
- if code != codes["OK"]:
- logging.warning(
- "Jenkins: {0}: {1}.".format(code, responses[code]))
- logging.info("Trying to download from Nexus:")
- spec.set_input_state(job, build["build"], "not found")
- if code == codes["not_found"]:
- release = re.search(REGEX_RELEASE, job).group(2)
- nexus_file_name = "{job}{sep}{build}{sep}{name}".\
- format(job=job, sep=SEPARATOR, build=build["build"],
- name=file_name)
- try:
- release = "rls{0}".format(int(release))
- except ValueError:
- pass
- url = "{url}/{release}/{dir}/{file}".\
- format(url=spec.environment["urls"]["URL[NEXUS]"],
- release=release,
- dir=spec.environment["urls"]["DIR[NEXUS]"],
- file=nexus_file_name)
- logging.info("Downloading the file '{0}' to '{1}' ...".
- format(url, new_name))
- response = get(url, stream=True)
- code = response.status_code
- if code != codes["OK"]:
- logging.error(
- "Nexus: {0}: {1}".format(code, responses[code]))
- spec.set_input_state(
- job, build["build"], "not found")
- continue
-
- file_handle = open(new_name, "wb")
- for chunk in response.iter_content(chunk_size=CHUNK_SIZE):
- if chunk:
- file_handle.write(chunk)
- file_handle.close()
-
- if spec.input["file-name"].endswith(".zip"):
- expected_length = None
- try:
- expected_length = int(response.
- headers["Content-Length"])
- logging.debug(" Expected file size: {0}B".
- format(expected_length))
- except KeyError:
- logging.debug(" No information about expected size.")
-
- real_length = getsize(new_name)
- logging.debug(" Downloaded size: {0}B".format(real_length))
-
- if expected_length:
- if real_length == expected_length:
- status = "downloaded"
- logging.info("{0}: {1}".format(code,
- responses[code]))
- else:
- logging.error("The file size differs from the "
- "expected size.")
- else:
- status = "downloaded"
- logging.info("{0}: {1}".format(code, responses[code]))
-
- elif spec.input["file-name"].endswith(".gz"):
- if "docs.fd.io" in url:
- execute_command("gzip --decompress --keep --force {0}".
- format(new_name))
- else:
- rename(new_name, new_name[:-3])
- execute_command("gzip --keep {0}".format(new_name[:-3]))
- new_name = new_name[:-3]
- status = "downloaded"
- logging.info("{0}: {1}".format(code, responses[code]))
-
- except ConnectionError as err:
- logging.error("Not possible to connect to '{0}'.".format(url))
- logging.debug(err)
- except HTTPError as err:
- logging.error("Invalid HTTP response from '{0}'.".format(url))
- logging.debug(err)
- except TooManyRedirects as err:
- logging.error("Request exceeded the configured number "
- "of maximum re-directions.")
- logging.debug(err)
- except Timeout as err:
- logging.error("Request timed out.")
- logging.debug(err)
- except RequestException as err:
- logging.error("Unexpected HTTP request exception.")
- logging.debug(err)
- except (IOError, ValueError, KeyError) as err:
- logging.error("Download failed.")
- logging.debug("Reason: {0}".format(err))
-
- spec.set_input_state(job, build["build"], status)
- spec.set_input_file_name(job, build["build"], new_name)
-
- if status == "failed":
- logging.info("Removing the file '{0}'".format(new_name))
- try:
- remove(new_name)
- except OSError as err:
- logging.warning(str(err))
- spec.set_input_file_name(job, build["build"], None)
-
- unzip_files(spec)
-
-
-def unzip_files(spec):
- """Unzip downloaded zip files
-
- :param spec: Specification.
+ data_file = spec.input["extract"]
+ file_name = build["file-name"]
+ directory = spec.environment["paths"]["DIR[WORKING,DATA]"]
+ tmp_dir = join(directory, str(pid))
+ try:
+ mkdir(tmp_dir)
+ except OSError:
+ pass
+ new_name = "{0}{1}{2}".format(file_name.rsplit('.')[-2],
+ SEPARATOR,
+ data_file.split("/")[-1])
+
+ log.append(("INFO", " Unzipping: '{0}' from '{1}'.".
+ format(data_file, file_name)))
+ try:
+ with ZipFile(file_name, 'r') as zip_file:
+ zip_file.extract(data_file, tmp_dir)
+ log.append(("INFO", " Renaming the file '{0}' to '{1}'".
+ format(join(tmp_dir, data_file), new_name)))
+ rename(join(tmp_dir, data_file), new_name)
+ build["file-name"] = new_name
+ return True
+ except (BadZipfile, RuntimeError) as err:
+ log.append(("ERROR", "Failed to unzip the file '{0}': {1}.".
+ format(file_name, str(err))))
+ return False
+ except OSError as err:
+ log.append(("ERROR", "Failed to rename the file '{0}': {1}.".
+ format(data_file, str(err))))
+ return False
+
+
+def download_and_unzip_data_file(spec, job, build, pid, log):
+ """Download and unzip a source file.
+
+ :param spec: Specification read form the specification file.
+ :param job: Name of the Jenkins job.
+ :param build: Information about the build.
+ :param pid: PID of the process executing this method.
+ :param log: List of log messages.
:type spec: Specification
- :raises: PresentationError if the zip file does not exist or it is not a
- zip file.
+ :type job: str
+ :type build: dict
+ :type pid: int
+ :type log: list of tuples (severity, msg)
+ :returns: True if the download was successful, otherwise False.
+ :rtype: bool
"""
- if spec.is_debug:
- data_file = spec.debug["extract"]
+ if job.startswith("csit-"):
+ if spec.input["file-name"].endswith(".zip"):
+ url = spec.environment["urls"]["URL[JENKINS,CSIT]"]
+ elif spec.input["file-name"].endswith(".gz"):
+ url = spec.environment["urls"]["URL[NEXUS,LOG]"]
+ else:
+ log.append(("ERROR", "Not supported file format."))
+ return False
+ elif job.startswith("hc2vpp-"):
+ url = spec.environment["urls"]["URL[JENKINS,HC]"]
else:
- data_file = spec.input["extract"]
-
- for job, builds in spec.builds.items():
- for build in builds:
- if build["status"] == "failed" or build["status"] == "not found":
- continue
+ raise PresentationError("No url defined for the job '{}'.".
+ format(job))
+ file_name = spec.input["file-name"]
+ full_name = spec.input["download-path"]. \
+ format(job=job, build=build["build"], filename=file_name)
+ url = "{0}/{1}".format(url, full_name)
+ new_name = join(spec.environment["paths"]["DIR[WORKING,DATA]"],
+ "{job}{sep}{build}{sep}{name}".
+ format(job=job, sep=SEPARATOR, build=build["build"],
+ name=file_name))
+ # Download the file from the defined source (Jenkins, logs.fd.io):
+ success = _download_file(url, new_name, log)
+
+ if success and new_name.endswith(".zip"):
+ if not is_zipfile(new_name):
+ success = False
+
+ # If not successful, download from docs.fd.io:
+ if not success:
+ log.append(("INFO", " Trying to download from https://docs.fd.io:"))
+ release = re.search(REGEX_RELEASE, job).group(2)
+ for rls in (release, "master"):
+ nexus_file_name = "{job}{sep}{build}{sep}{name}". \
+ format(job=job, sep=SEPARATOR, build=build["build"],
+ name=file_name)
try:
- status = "failed"
- directory = spec.environment["paths"]["DIR[WORKING,DATA]"]
- file_name = join(build["file-name"])
-
- if build["status"] == "downloaded":
- logging.info("Unziping: '{0}' from '{1}'.".
- format(data_file, file_name))
- new_name = "{0}{1}{2}".format(file_name.rsplit('.')[-2],
- SEPARATOR,
- data_file.split("/")[-1])
- try:
- if is_zipfile(file_name):
- with ZipFile(file_name, 'r') as zip_file:
- zip_file.extract(data_file, directory)
- logging.info("Moving {0} to {1} ...".
- format(join(directory, data_file),
- directory))
- move(join(directory, data_file), directory)
- logging.info("Renaming the file '{0}' to '{1}'".
- format(join(directory,
- data_file.split("/")[-1]),
- new_name))
- rename(join(directory, data_file.split("/")[-1]),
- new_name)
- spec.set_input_file_name(job, build["build"],
- new_name)
- status = "unzipped"
- spec.set_input_state(job, build["build"], status)
- except (BadZipfile, RuntimeError) as err:
- logging.error("Failed to unzip the file '{0}': {1}.".
- format(file_name, str(err)))
- except OSError as err:
- logging.error("Failed to rename the file '{0}': {1}.".
- format(data_file, str(err)))
- finally:
- if status == "failed":
- spec.set_input_file_name(job, build["build"], None)
- else:
- raise PresentationError("The file '{0}' does not exist or "
- "it is not a zip file".
- format(file_name))
-
- spec.set_input_state(job, build["build"], status)
-
- except KeyError:
+ rls = "rls{0}".format(int(rls))
+ except ValueError:
pass
+ url = "{url}/{release}/{dir}/{file}". \
+ format(url=spec.environment["urls"]["URL[NEXUS]"],
+ release=rls,
+ dir=spec.environment["urls"]["DIR[NEXUS]"],
+ file=nexus_file_name)
+ success = _download_file(url, new_name, log)
+ if success:
+ break
+
+ if success:
+ build["file-name"] = new_name
+ else:
+ return False
+
+ if spec.input["file-name"].endswith(".gz"):
+ if "docs.fd.io" in url:
+ execute_command("gzip --decompress --keep --force {0}".
+ format(new_name))
+ else:
+ rename(new_name, new_name[:-3])
+ execute_command("gzip --keep {0}".format(new_name[:-3]))
+ build["file-name"] = new_name[:-3]
+
+ if new_name.endswith(".zip"):
+ if is_zipfile(new_name):
+ return _unzip_file(spec, build, pid, log)
+ else:
+ log.append(("ERROR",
+ "Zip file '{0}' is corrupted.".format(new_name)))
+ return False
+ else:
+ return True
diff --git a/resources/tools/presentation/input_data_parser.py b/resources/tools/presentation/input_data_parser.py
index 7adc9c085b..beec34c106 100644
--- a/resources/tools/presentation/input_data_parser.py
+++ b/resources/tools/presentation/input_data_parser.py
@@ -18,15 +18,20 @@
- provide access to the data.
"""
+import multiprocessing
+import os
import re
import pandas as pd
import logging
-import xml.etree.ElementTree as ET
from robot.api import ExecutionResult, ResultVisitor
from robot import errors
from collections import OrderedDict
from string import replace
+from os import remove
+
+from input_data_files import download_and_unzip_data_file
+from utils import Worker
class ExecutionChecker(ResultVisitor):
@@ -171,14 +176,14 @@ class ExecutionChecker(ResultVisitor):
REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
r'[\D\d]*')
- REGEX_VERSION = re.compile(r"(stdout: 'vat# vat# Version:)(\s*)(.*)")
+ REGEX_VERSION = re.compile(r"(return STDOUT Version:\s*)(.*)")
REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
r'tx\s(\d*),\srx\s(\d*)')
- def __init__(self, **metadata):
+ def __init__(self, metadata):
"""Initialisation.
:param metadata: Key-value pairs to be included in "metadata" part of
@@ -244,14 +249,13 @@ class ExecutionChecker(ResultVisitor):
:returns: Nothing.
"""
- if msg.message.count("stdout: 'vat# vat# Version:"):
+ if msg.message.count("return STDOUT Version:"):
self._version = str(re.search(self.REGEX_VERSION, msg.message).
- group(3))
+ group(2))
self._data["metadata"]["version"] = self._version
+ self._data["metadata"]["generated"] = msg.timestamp
self._msg_type = None
- logging.debug(" VPP version: {0}".format(self._version))
-
def _get_vat_history(self, msg):
"""Called when extraction of VAT command history is required.
@@ -585,7 +589,7 @@ class ExecutionChecker(ResultVisitor):
:type setup_kw: Keyword
:returns: Nothing.
"""
- if setup_kw.name.count("Vpp Show Version Verbose") \
+ if setup_kw.name.count("Show Vpp Version On All Duts") \
and not self._version:
self._msg_type = "setup-version"
setup_kw.messages.visit(self)
@@ -696,7 +700,7 @@ class InputData(object):
self._cfg = spec
# Data store:
- self._input_data = None
+ self._input_data = pd.Series()
@property
def data(self):
@@ -747,76 +751,186 @@ class InputData(object):
return self.data[job][build]["tests"]
@staticmethod
- def _parse_tests(job, build):
+ def _parse_tests(job, build, log):
"""Process data from robot output.xml file and return JSON structured
data.
:param job: The name of job which build output data will be processed.
:param build: The build which output data will be processed.
+ :param log: List of log messages.
:type job: str
:type build: dict
+ :type log: list of tuples (severity, msg)
:returns: JSON data structure.
:rtype: dict
"""
- tree = ET.parse(build["file-name"])
- root = tree.getroot()
- generated = root.attrib["generated"]
+ metadata = {
+ "job": job,
+ "build": build
+ }
with open(build["file-name"], 'r') as data_file:
try:
result = ExecutionResult(data_file)
except errors.DataError as err:
- logging.error("Error occurred while parsing output.xml: {0}".
- format(err))
+ log.append(("ERROR", "Error occurred while parsing output.xml: "
+ "{0}".format(err)))
return None
- checker = ExecutionChecker(job=job, build=build, generated=generated)
+ checker = ExecutionChecker(metadata)
result.visit(checker)
return checker.data
- def read_data(self):
- """Parse input data from input files and store in pandas' Series.
+ def _download_and_parse_build(self, pid, data_queue, job, build, repeat):
+ """Download and parse the input data file.
+
+ :param pid: PID of the process executing this method.
+ :param data_queue: Shared memory between processes. Queue which keeps
+ the result data. This data is then read by the main process and used
+ in further processing.
+ :param job: Name of the Jenkins job which generated the processed input
+ file.
+ :param build: Information about the Jenkins build which generated the
+ processed input file.
+ :param repeat: Repeat the download specified number of times if not
+ successful.
+ :type pid: int
+ :type data_queue: multiprocessing.Manager().Queue()
+ :type job: str
+ :type build: dict
+ :type repeat: int
+ """
+
+ logs = list()
+
+ logging.info(" Processing the job/build: {0}: {1}".
+ format(job, build["build"]))
+
+ logs.append(("INFO", " Processing the job/build: {0}: {1}".
+ format(job, build["build"])))
+
+ state = "failed"
+ success = False
+ data = None
+ do_repeat = repeat
+ while do_repeat:
+ success = download_and_unzip_data_file(self._cfg, job, build, pid,
+ logs)
+ if success:
+ break
+ do_repeat -= 1
+ if not success:
+ logs.append(("ERROR", "It is not possible to download the input "
+ "data file from the job '{job}', build "
+ "'{build}', or it is damaged. Skipped.".
+ format(job=job, build=build["build"])))
+ if success:
+ logs.append(("INFO", " Processing data from the build '{0}' ...".
+ format(build["build"])))
+ data = InputData._parse_tests(job, build, logs)
+ if data is None:
+ logs.append(("ERROR", "Input data file from the job '{job}', "
+ "build '{build}' is damaged. Skipped.".
+ format(job=job, build=build["build"])))
+ else:
+ state = "processed"
+
+ try:
+ remove(build["file-name"])
+ except OSError as err:
+ logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
+ format(build["file-name"], err)))
+ logs.append(("INFO", " Done."))
+
+ result = {
+ "data": data,
+ "state": state,
+ "job": job,
+ "build": build,
+ "logs": logs
+ }
+ data_queue.put(result)
+
+ def download_and_parse_data(self, repeat=1):
+ """Download the input data files, parse input data from input files and
+ store in pandas' Series.
+
+ :param repeat: Repeat the download specified number of times if not
+ successful.
+ :type repeat: int
"""
- logging.info("Parsing input files ...")
+ logging.info("Downloading and parsing input files ...")
+
+ work_queue = multiprocessing.JoinableQueue()
+ manager = multiprocessing.Manager()
+ data_queue = manager.Queue()
+ cpus = multiprocessing.cpu_count()
+
+ workers = list()
+ for cpu in range(cpus):
+ worker = Worker(work_queue,
+ data_queue,
+ self._download_and_parse_build)
+ worker.daemon = True
+ worker.start()
+ workers.append(worker)
+ os.system("taskset -p -c {0} {1} > /dev/null 2>&1".
+ format(cpu, worker.pid))
- job_data = dict()
for job, builds in self._cfg.builds.items():
- logging.info(" Extracting data from the job '{0}' ...'".
- format(job))
- builds_data = dict()
for build in builds:
- if build["status"] == "failed" \
- or build["status"] == "not found":
- continue
- logging.info(" Extracting data from the build '{0}'".
- format(build["build"]))
- logging.info(" Processing the file '{0}'".
- format(build["file-name"]))
- data = InputData._parse_tests(job, build)
- if data is None:
- logging.error("Input data file from the job '{job}', build "
- "'{build}' is damaged. Skipped.".
- format(job=job, build=build["build"]))
- continue
+ work_queue.put((job, build, repeat))
+
+ work_queue.join()
+
+ logging.info("Done.")
+
+ while not data_queue.empty():
+ result = data_queue.get()
+
+ job = result["job"]
+ build_nr = result["build"]["build"]
+ if result["data"]:
+ data = result["data"]
build_data = pd.Series({
"metadata": pd.Series(data["metadata"].values(),
index=data["metadata"].keys()),
"suites": pd.Series(data["suites"].values(),
index=data["suites"].keys()),
"tests": pd.Series(data["tests"].values(),
- index=data["tests"].keys()),
- })
- builds_data[str(build["build"])] = build_data
- logging.info(" Done.")
+ index=data["tests"].keys())})
- job_data[job] = pd.Series(builds_data.values(),
- index=builds_data.keys())
- logging.info(" Done.")
+ if self._input_data.get(job, None) is None:
+ self._input_data[job] = pd.Series()
+ self._input_data[job][str(build_nr)] = build_data
+
+ self._cfg.set_input_file_name(job, build_nr,
+ result["build"]["file-name"])
+
+ self._cfg.set_input_state(job, build_nr, result["state"])
+
+ for item in result["logs"]:
+ if item[0] == "INFO":
+ logging.info(item[1])
+ elif item[0] == "ERROR":
+ logging.error(item[1])
+ elif item[0] == "DEBUG":
+ logging.debug(item[1])
+ elif item[0] == "CRITICAL":
+ logging.critical(item[1])
+ elif item[0] == "WARNING":
+ logging.warning(item[1])
+
+ del data_queue
+
+ # Terminate all workers
+ for worker in workers:
+ worker.terminate()
+ worker.join()
- self._input_data = pd.Series(job_data.values(), index=job_data.keys())
logging.info("Done.")
@staticmethod
@@ -893,9 +1007,6 @@ class InputData(object):
:rtype pandas.Series
"""
- logging.info(" Creating the data set for the {0} '{1}'.".
- format(element.get("type", ""), element.get("title", "")))
-
try:
if element["filter"] in ("all", "template"):
cond = "True"
diff --git a/resources/tools/presentation/pal.py b/resources/tools/presentation/pal.py
index 98642c898c..013c921124 100644
--- a/resources/tools/presentation/pal.py
+++ b/resources/tools/presentation/pal.py
@@ -21,7 +21,6 @@ import logging
from errors import PresentationError
from environment import Environment, clean_environment
from specification_parser import Specification
-from input_data_files import download_data_files, unzip_files
from input_data_parser import InputData
from generator_tables import generate_tables
from generator_plots import generate_plots
@@ -30,8 +29,6 @@ from static_content import prepare_static_content
from generator_report import generate_report
from generator_CPTA import generate_cpta
-from pprint import pprint
-
def parse_args():
"""Parse arguments from cmd line.
@@ -86,21 +83,20 @@ def main():
logging.critical("Finished with error.")
return 1
- ret_code = 0
+ if spec.output["output"] not in ("report", "CPTA"):
+ logging.critical("The output '{0}' is not supported.".
+ format(spec.output["output"]))
+ return 1
+
+ ret_code = 1
try:
env = Environment(spec.environment, args.force)
env.set_environment()
- if spec.is_debug:
- if spec.debug["input-format"] == "zip":
- unzip_files(spec)
- else:
- download_data_files(spec)
-
prepare_static_content(spec)
data = InputData(spec)
- data.read_data()
+ data.download_and_parse_data(repeat=2)
generate_tables(spec, data)
generate_plots(spec, data)
@@ -112,21 +108,16 @@ def main():
elif spec.output["output"] == "CPTA":
sys.stdout.write(generate_cpta(spec, data))
logging.info("Successfully finished.")
- else:
- logging.critical("The output '{0}' is not supported.".
- format(spec.output["output"]))
- ret_code = 1
+ ret_code = 0
except (KeyError, ValueError, PresentationError) as err:
logging.info("Finished with an error.")
logging.critical(str(err))
- ret_code = 1
except Exception as err:
logging.info("Finished with an unexpected error.")
logging.critical(str(err))
- ret_code = 1
finally:
- if spec is not None and not spec.is_debug:
+ if spec is not None:
clean_environment(spec.environment)
return ret_code
diff --git a/resources/tools/presentation/specification.yaml b/resources/tools/presentation/specification.yaml
index da4443dc30..f3dba402da 100644
--- a/resources/tools/presentation/specification.yaml
+++ b/resources/tools/presentation/specification.yaml
@@ -1,14 +1,5 @@
-
type: "environment"
- configuration:
- # Debug mode:
- # - Skip:
- # - Download of input data files
- # - Do:
- # - Read data from given zip / xml files
- # - Set the configuration as it is done in normal mode
- # If the section "type: debug" is missing, CFG[DEBUG] is set to 0.
- CFG[DEBUG]: 0
paths:
# Top level directories:
@@ -38,9 +29,11 @@
DIR[DTR]: "{DIR[WORKING,SRC]}/detailed_test_results"
DIR[DTR,PERF,DPDK]: "{DIR[DTR]}/dpdk_performance_results"
DIR[DTR,PERF,VPP]: "{DIR[DTR]}/vpp_performance_results"
+ DIR[DTR,MRR,VPP]: "{DIR[DTR]}/vpp_mrr_results"
DIR[DTR,PERF,COT]: "{DIR[DTR]}/cot_performance_results"
DIR[DTR,PERF,HC]: "{DIR[DTR]}/honeycomb_performance_results"
DIR[DTR,FUNC,VPP]: "{DIR[DTR]}/vpp_functional_results"
+ DIR[DTR,FUNC,VPP,CENTOS]: "{DIR[DTR]}/vpp_functional_results_centos"
DIR[DTR,FUNC,HC]: "{DIR[DTR]}/honeycomb_functional_results"
DIR[DTR,FUNC,NSHSFC]: "{DIR[DTR]}/nshsfc_functional_results"
DIR[DTR,PERF,VPP,IMPRV]: "{DIR[WORKING,SRC]}/vpp_performance_tests/performance_improvements"
@@ -48,7 +41,9 @@
# Detailed test configurations
DIR[DTC]: "{DIR[WORKING,SRC]}/test_configuration"
DIR[DTC,PERF,VPP]: "{DIR[DTC]}/vpp_performance_configuration"
+ DIR[DTC,MRR,VPP]: "{DIR[DTC]}/vpp_mrr_configuration"
DIR[DTC,FUNC,VPP]: "{DIR[DTC]}/vpp_functional_configuration"
+ DIR[DTC,FUNC,VPP,CENTOS]: "{DIR[DTC]}/vpp_functional_configuration_centos"
# Detailed tests operational data
DIR[DTO]: "{DIR[WORKING,SRC]}/test_operational_data"
@@ -79,6 +74,7 @@
# List the directories which are deleted while cleaning the environment.
# All directories MUST be defined in "paths" section.
#- "DIR[BUILD,HTML]"
+ - "DIR[WORKING,DATA]"
build-dirs:
# List the directories where the results (build) is stored.
@@ -91,49 +87,19 @@
data-sets:
plot-vpp-http-server-performance:
csit-vpp-perf-1801-all:
- - 157
- - 158
- - 159
- - 160
- - 161
- - 164
- - 165
- - 166
- - 168
- - 169
- - 170
-# TODO: Add the data sources
-# vpp-meltdown-impact:
-# csit-vpp-perf-1707-all:
-# - 9
-# - 10
-# - 13
-# csit-vpp-perf-1710-all:
-# - 11l
-# - 12
-# - 13
-# TODO: Add the data sources
-# vpp-spectre-impact:
-# csit-vpp-perf-1707-all:
-# - 9
-# - 10
-# - 13
+ - 534 # wrk
+ vpp-performance-changes:
# csit-vpp-perf-1710-all:
# - 11
# - 12
# - 13
- vpp-performance-changes:
- csit-vpp-perf-1710-all:
- - 11
- - 12
- - 13
- - 14
- - 15
- - 16
- - 17
- - 18
- - 19
- - 20
+# - 14
+# - 15
+# - 16
+# - 17
+# - 18
+# - 19
+# - 20
csit-vpp-perf-1801-all:
- 124 # sel
- 127 # sel
@@ -147,133 +113,52 @@
- 163 # sel
- 167 # sel
- 172 # sel acl only
+ - 535 # full
+ - 539 # full
+ - 533 # full
+ - 540 # full
plot-throughput-speedup-analysis:
csit-vpp-perf-1801-all:
- - 122 # full
- - 126 # full
- - 129 # full
- - 140 # full
- - 124 # sel
- - 127 # sel
- - 128 # sel
- - 141 # sel
- - 142 # sel
- - 143 # sel
- - 145 # sel
- - 146 # sel
- - 162 # sel
- - 163 # sel
- - 167 # sel
- - 172 # sel acl only
-# performance-improvements:
-# csit-vpp-perf-1707-all:
-# - 9
-# - 10
-# - 13
-# - 14
-# - 15
-# - 16
-# - 17
-# - 18
-# - 19
-# - 21
-# csit-vpp-perf-1710-all:
-# - 11
-# - 12
-# - 13
-# - 14
-# - 15
-# - 16
-# - 17
-# - 18
-# - 19
-# - 20
-# csit-vpp-perf-1801-all:
-# - 124
-# - 127
-# - 128
-# csit-ligato-perf-1710-all:
-# - 5
-# - 7
-# - 8
-# - 9
-# - 10
-# - 11
-# - 12
-# - 13
-# - 16
-# - 17
-# csit-ligato-perf-1801-all:
-# - 16 # sel
-# - 17 # sel
-# - 18 # sel
-# - 19 # sel
-# - 20 # sel
-# - 21 # sel
-# - 22 # sel
-# - 23 # sel
-# - 24 # sel
+ - 535 # full
+ - 539 # full
+ - 533 # full
+ - 540 # full
+ plot-ligato-throughput-speedup-analysis:
+ csit-ligato-perf-1801-all:
+ - 27
vpp-perf-results:
csit-vpp-perf-1801-all:
- - 122
- - 126
- - 129
- - 140
+ - 535 # full
+ - 539 # full
+ - 533 # full
+ - 540 # full
vpp-func-results:
csit-vpp-functional-1801-ubuntu1604-virl:
- - "lastSuccessfulBuild"
+ - 454
+ vpp-func-results-centos:
+ csit-vpp-functional-1801-centos7-virl:
+ - 454
+ vpp-mrr-results:
+ csit-vpp-perf-check-1801:
+ - 18 # mrr - full
ligato-perf-results:
csit-ligato-perf-1801-all:
- - 25 # full
- dpdk-perf-results:
- csit-dpdk-perf-1801-all:
- - 12
+ - 27 # full
hc-func-results:
csit-hc2vpp-verify-func-1801-ubuntu1604:
- - "lastSuccessfulBuild"
+ - 14
nsh-func-results:
csit-nsh_sfc-verify-func-1801-ubuntu1604-virl:
- - 1
+ - 3
plot-vpp-throughput-latency:
csit-vpp-perf-1801-all:
- - 122 # full
- - 126 # full
- - 129 # full
- - 140 # full
- - 124 # sel
- - 127 # sel
- - 128 # sel
- - 141 # sel
- - 142 # sel
- - 143 # sel
- - 145 # sel
- - 146 # sel
- - 162 # sel
- - 163 # sel
- - 167 # sel
- - 172 # sel acl only
- plot-dpdk-throughput-latency:
- csit-dpdk-perf-1801-all:
- - 1
- - 3
- - 4
- - 5
- - 6
- - 7
- - 8
- - 10
- - 12
+ - 535 # full
+ - 539 # full
+ - 533 # full
+ - 540 # full
plot-ligato-throughput-latency:
csit-ligato-perf-1801-all:
- - 16 # sel
- - 17 # sel
- - 18 # sel
- - 19 # sel
- - 20 # sel
- - 21 # sel
- - 22 # sel
- - 23 # sel
- - 24 # sel
+ - 27 # full
plot-layouts:
@@ -492,27 +377,6 @@
height: 1000
-
- type: "debug"
- general:
- input-format: "xml" # zip or xml
- extract: "robot-plugin/output.xml" # Only for zip
- builds:
- # The files must be in the directory DIR[WORKING,DATA]
- csit-vpp-perf-1801-all:
- -
- build: 1
- file: "{DIR[WORKING,DATA]}/output.xml"
- -
- build: 2
- file: "{DIR[WORKING,DATA]}/output.xml"
- -
- build: 3
- file: "{DIR[WORKING,DATA]}/output.xml"
- -
- build: 4
- file: "{DIR[WORKING,DATA]}/output.xml"
-
--
type: "static"
src-path: "{DIR[RST]}"
dst-path: "{DIR[WORKING,SRC]}"
@@ -525,17 +389,6 @@
download-path: "{job}/{build}/robot/report/*zip*/{filename}"
extract: "robot-plugin/output.xml"
builds:
-# csit-vpp-perf-1707-all:
-# - 9
-# - 10
-# - 13
-# - 14
-# - 15
-# - 16
-# - 17
-# - 18
-# - 19
-# - 21
csit-vpp-perf-1710-all:
- 11
- 12
@@ -548,10 +401,6 @@
- 19
- 20
csit-vpp-perf-1801-all:
- - 122 # full
- - 126 # full
- - 129 # full
- - 140 # full
- 124 # sel
- 127 # sel
- 128 # sel
@@ -560,21 +409,17 @@
- 143 # sel
- 145 # sel
- 146 # sel
- - 157 # wrk
- - 158 # wrk
- - 159 # wrk
- - 160 # wrk
- - 161 # wrk
- 162 # sel
- 163 # sel
- - 164 # wrk
- - 165 # wrk
- - 166 # wrk
- 167 # sel
- - 168 # wrk
- - 169 # wrk
- - 170 # wrk
- 172 # sel acl only
+ - 535 # 18.01.2 full
+ - 539 # 18.01.2 full
+ - 533 # 18.01.2 full
+ - 540 # 18.01.2 full
+ - 534 # 18.01.2 wrk
+ csit-vpp-perf-check-1801:
+ - 18 # mrr full
csit-ligato-perf-1710-all:
- 5
- 7
@@ -586,44 +431,16 @@
- 13
- 16
- 17
- csit-dpdk-perf-1801-all:
- - 1
- - 4
- - 5
- - 7
- - 8
- - 10
- - 12
- - 16
- - 17
csit-ligato-perf-1801-all:
- - 16 # sel
- - 17 # sel
- - 18 # sel
- - 19 # sel
- - 20 # sel
- - 21 # sel
- - 22 # sel
- - 23 # sel
- - 24 # sel
- - 25 # full
- csit-dpdk-perf-1801-all:
- - 1
- - 3
- - 4
- - 5
- - 6
- - 7
- - 8
- - 9
- - 10
- - 12
+ - 27 # full
csit-vpp-functional-1801-ubuntu1604-virl:
- - lastSuccessfulBuild
+ - 454
+ csit-vpp-functional-1801-centos7-virl:
+ - 454
csit-nsh_sfc-verify-func-1801-ubuntu1604-virl:
- - 1
+ - 3
csit-hc2vpp-verify-func-1801-ubuntu1604:
- - lastSuccessfulBuild
+ - 14
-
type: "output"
@@ -638,74 +455,6 @@
### T A B L E S ###
################################################################################
-#-
-# type: "table"
-# title: "Performance Impact of Meltdown Patches"
-# algorithm: "table_performance_comparison"
-# output-file-ext: ".csv"
-## TODO: specify dir
-# output-file: "{DIR[STATIC,VPP]}/meltdown-impact"
-# reference:
-# title: "No Meltdown"
-## TODO: specify data sources
-# data:
-# csit-vpp-perf-1707-all:
-# - 9
-# - 10
-# - 13
-# compare:
-# title: "Meltdown Patches Applied"
-## TODO: specify data sources
-# data:
-# csit-vpp-perf-1710-all:
-# - 11
-# - 12
-# - 13
-# data:
-# "vpp-meltdown-impact"
-# filter: "all"
-# parameters:
-# - "name"
-# - "parent"
-# - "throughput"
-# # Number of the best and the worst tests presented in the table. Use 0 (zero)
-# # to present all tests.
-# nr-of-tests-shown: 20
-#
-#-
-# type: "table"
-# title: "Performance Impact of Spectre Patches"
-# algorithm: "table_performance_comparison"
-# output-file-ext: ".csv"
-## TODO: specify dir
-# output-file: "{DIR[STATIC,VPP]}/meltdown-spectre-impact"
-# reference:
-# title: "No Spectre"
-## TODO: specify data sources
-# data:
-# csit-vpp-perf-1707-all:
-# - 9
-# - 10
-# - 13
-# compare:
-# title: "Spectre Patches Applied"
-## TODO: specify data sources
-# data:
-# csit-vpp-perf-1710-all:
-# - 11
-# - 12
-# - 13
-# data:
-# "vpp-spectre-impact"
-# filter: "all"
-# parameters:
-# - "name"
-# - "parent"
-# - "throughput"
-# # Number of the best and the worst tests presented in the table. Use 0 (zero)
-# # to present all tests.
-# nr-of-tests-shown: 20
-
-
type: "table"
title: "VPP Performance Changes"
@@ -713,22 +462,7 @@
output-file-ext: ".csv"
output-file: "{DIR[STATIC,VPP]}/performance-changes"
reference:
- title: "Release 1710"
- data:
- csit-vpp-perf-1710-all:
- - 11
- - 12
- - 13
- - 14
- - 15
- - 16
- - 17
- - 18
- - 19
- - 20
- compare:
- title: "Release 1801"
-# TODO: specify data sources
+ title: "Release 18.01"
data:
csit-vpp-perf-1801-all:
- 124 # sel
@@ -743,6 +477,25 @@
- 163 # sel
- 167 # sel
- 172 # sel acl only
+# csit-vpp-perf-1710-all:
+# - 11
+# - 12
+# - 13
+# - 14
+# - 15
+# - 16
+# - 17
+# - 18
+# - 19
+# - 20
+ compare:
+ title: "Release 18.01.2"
+ data:
+ csit-vpp-perf-1801-all:
+ - 535 # full
+ - 539 # full
+ - 533 # full
+ - 540 # full
data: "vpp-performance-changes"
filter: "all"
parameters:
@@ -754,48 +507,6 @@
nr-of-tests-shown: 20
outlier-const: 1.5
-#-
-# type: "table"
-# title: "Performance improvements"
-# algorithm: "table_performance_improvements"
-# template: "{DIR[DTR,PERF,VPP,IMPRV]}/tmpl_performance_improvements.csv"
-# output-file-ext: ".csv"
-# output-file: "{DIR[DTR,PERF,VPP,IMPRV]}/performance_improvements"
-# columns:
-# -
-# title: "Test Name"
-# data: "template 1"
-# -
-# title: "16.09 mean [Mpps]"
-# data: "template 2"
-# -
-# title: "17.01 mean [Mpps]"
-# data: "template 3"
-# -
-# title: "17.04 mean [Mpps]"
-# data: "template 4"
-# -
-# title: "17.07 mean [Mpps]"
-# data: "data csit-vpp-perf-1707-all mean"
-# -
-# title: "17.10 mean [Mpps]"
-# data: "data csit-vpp-perf-1710-all csit-ligato-perf-1710-all mean"
-# -
-# title: "18.01 mean [Mpps]"
-# data: "data csit-vpp-perf-1801-all csit-ligato-perf-1801-all mean"
-# -
-# title: "18.01 stdev [Mpps]"
-# data: "data csit-vpp-perf-1801-all csit-ligato-perf-1801-all stdev"
-# -
-# title: "17.10 to 18.01 change [%]"
-# data: "operation relative_change 5 6"
-# rows: "generated"
-# data:
-# "performance-improvements"
-# filter: "template"
-# parameters:
-# - "throughput"
-
-
type: "table"
title: "Detailed Test Results - VPP Performance Results"
@@ -868,6 +579,54 @@
-
type: "table"
+ title: "Detailed Test Results - VPP MRR Results"
+ algorithm: "table_details"
+ output-file-ext: ".csv"
+ output-file: "{DIR[DTR,MRR,VPP]}/vpp_mrr_results"
+ columns:
+ -
+ title: "Name"
+ data: "data name"
+ -
+ title: "Documentation"
+ data: "data doc"
+ -
+ title: "Status"
+ data: "data msg"
+ rows: "generated"
+ data:
+ "vpp-mrr-results"
+ filter: "'MRR'"
+ parameters:
+ - "name"
+ - "parent"
+ - "doc"
+ - "msg"
+
+-
+ type: "table"
+ title: "Test configuration - VPP MRR Test Configs"
+ algorithm: "table_details"
+ output-file-ext: ".csv"
+ output-file: "{DIR[DTC,MRR,VPP]}/vpp_mrr_test_configuration"
+ columns:
+ -
+ title: "Name"
+ data: "data name"
+ -
+ title: "VPP API Test (VAT) Commands History - Commands Used Per Test Case"
+ data: "data vat-history"
+ rows: "generated"
+ data:
+ "vpp-mrr-results"
+ filter: "'MRR'"
+ parameters:
+ - "parent"
+ - "name"
+ - "vat-history"
+
+-
+ type: "table"
title: "Detailed Test Results - VPP Functional Results"
algorithm: "table_details"
output-file-ext: ".csv"
@@ -894,6 +653,32 @@
-
type: "table"
+ title: "Detailed Test Results - VPP Functional Results - CentOS"
+ algorithm: "table_details"
+ output-file-ext: ".csv"
+ output-file: "{DIR[DTR,FUNC,VPP,CENTOS]}/vpp_functional_results_centos"
+ columns:
+ -
+ title: "Name"
+ data: "data name"
+ -
+ title: "Documentation"
+ data: "data doc"
+ -
+ title: "Status"
+ data: "data status"
+ rows: "generated"
+ data:
+ "vpp-func-results-centos"
+ filter: "all"
+ parameters:
+ - "name"
+ - "parent"
+ - "doc"
+ - "status"
+
+-
+ type: "table"
title: "Test configuration - VPP Functional Test Configs"
algorithm: "table_details"
output-file-ext: ".csv"
@@ -916,36 +701,32 @@
-
type: "table"
- title: "Detailed Test Results - Container Orchestrated Topologies Performance Results"
+ title: "Test configuration - VPP Functional Test Configs - CentOS"
algorithm: "table_details"
output-file-ext: ".csv"
- output-file: "{DIR[DTR,PERF,COT]}/cot_performance_results"
+ output-file: "{DIR[DTC,FUNC,VPP,CENTOS]}/vpp_functional_configuration_centos"
columns:
-
title: "Name"
data: "data name"
-
- title: "Documentation"
- data: "data doc"
- -
- title: "Status"
- data: "data msg"
+ title: "VPP API Test (VAT) Commands History - Commands Used Per Test Case"
+ data: "data vat-history"
rows: "generated"
data:
- "ligato-perf-results"
+ "vpp-func-results-centos"
filter: "all"
parameters:
- - "name"
- "parent"
- - "doc"
- - "msg"
+ - "name"
+ - "vat-history"
-
type: "table"
- title: "Detailed Test Results - DPDK Performance Results"
+ title: "Detailed Test Results - Container Orchestrated Topologies Performance Results"
algorithm: "table_details"
output-file-ext: ".csv"
- output-file: "{DIR[DTR,PERF,DPDK]}/dpdk_performance_results"
+ output-file: "{DIR[DTR,PERF,COT]}/cot_performance_results"
columns:
-
title: "Name"
@@ -958,7 +739,7 @@
data: "data msg"
rows: "generated"
data:
- "dpdk-perf-results"
+ "ligato-perf-results"
filter: "all"
parameters:
- "name"
@@ -1081,6 +862,44 @@
-
type: "file"
+ title: "VPP MRR Results"
+ algorithm: "file_test_results"
+ output-file-ext: ".rst"
+ output-file: "{DIR[DTR,MRR,VPP]}/vpp_mrr_results"
+ file-header: "\n.. |br| raw:: html\n\n <br />\n\n\n.. |prein| raw:: html\n\n <pre>\n\n\n.. |preout| raw:: html\n\n </pre>\n\n"
+ dir-tables: "{DIR[DTR,MRR,VPP]}"
+ data:
+ "vpp-mrr-results"
+ filter: "'MRR'"
+ parameters:
+ - "name"
+ - "doc"
+ - "level"
+ - "parent"
+ data-start-level: 2 # 0, 1, 2, ...
+ chapters-start-level: 2 # 0, 1, 2, ...
+
+-
+ type: "file"
+ title: "VPP MRR Configuration"
+ algorithm: "file_test_results"
+ output-file-ext: ".rst"
+ output-file: "{DIR[DTC,MRR,VPP]}/vpp_mrr_configuration"
+ file-header: "\n.. |br| raw:: html\n\n <br />\n\n\n.. |prein| raw:: html\n\n <pre>\n\n\n.. |preout| raw:: html\n\n </pre>\n\n"
+ dir-tables: "{DIR[DTC,MRR,VPP]}"
+ data:
+ "vpp-mrr-results"
+ filter: "'MRR'"
+ parameters:
+ - "name"
+ - "doc"
+ - "level"
+ - "parent"
+ data-start-level: 2 # 0, 1, 2, ...
+ chapters-start-level: 2 # 0, 1, 2, ...
+
+-
+ type: "file"
title: "VPP Functional Results"
algorithm: "file_test_results"
output-file-ext: ".rst"
@@ -1099,6 +918,24 @@
-
type: "file"
+ title: "VPP Functional Results - CentOS"
+ algorithm: "file_test_results"
+ output-file-ext: ".rst"
+ output-file: "{DIR[DTR,FUNC,VPP,CENTOS]}/vpp_functional_results_centos"
+ file-header: "\n.. |br| raw:: html\n\n <br />\n\n\n.. |prein| raw:: html\n\n <pre>\n\n\n.. |preout| raw:: html\n\n </pre>\n\n"
+ dir-tables: "{DIR[DTR,FUNC,VPP,CENTOS]}"
+ data:
+ "vpp-func-results-centos"
+ filter: "all"
+ parameters:
+ - "name"
+ - "doc"
+ - "level"
+ data-start-level: 3 # 0, 1, 2, ...
+ chapters-start-level: 2 # 0, 1, 2, ...
+
+-
+ type: "file"
title: "VPP Functional Configuration"
algorithm: "file_test_results"
output-file-ext: ".rst"
@@ -1117,39 +954,37 @@
-
type: "file"
- title: "Container Orchestrated Performance Results"
+ title: "VPP Functional Configuration - CentOS"
algorithm: "file_test_results"
output-file-ext: ".rst"
- output-file: "{DIR[DTR,PERF,COT]}/cot_performance_results"
+ output-file: "{DIR[DTC,FUNC,VPP,CENTOS]}/vpp_functional_configuration_centos"
file-header: "\n.. |br| raw:: html\n\n <br />\n\n\n.. |prein| raw:: html\n\n <pre>\n\n\n.. |preout| raw:: html\n\n </pre>\n\n"
- dir-tables: "{DIR[DTR,PERF,COT]}"
+ dir-tables: "{DIR[DTC,FUNC,VPP,CENTOS]}"
data:
- "ligato-perf-results"
+ "vpp-func-results-centos"
filter: "all"
parameters:
- "name"
- "doc"
- "level"
- data-start-level: 2 # 0, 1, 2, ...
+ data-start-level: 3 # 0, 1, 2, ...
chapters-start-level: 2 # 0, 1, 2, ...
-
type: "file"
- title: "DPDK Performance Results"
+ title: "Container Orchestrated Performance Results"
algorithm: "file_test_results"
output-file-ext: ".rst"
- output-file: "{DIR[DTR,PERF,DPDK]}/dpdk_performance_results"
+ output-file: "{DIR[DTR,PERF,COT]}/cot_performance_results"
file-header: "\n.. |br| raw:: html\n\n <br />\n\n\n.. |prein| raw:: html\n\n <pre>\n\n\n.. |preout| raw:: html\n\n </pre>\n\n"
- dir-tables: "{DIR[DTR,PERF,DPDK]}"
+ dir-tables: "{DIR[DTR,PERF,COT]}"
data:
- "dpdk-perf-results"
+ "ligato-perf-results"
filter: "all"
parameters:
- "name"
- "doc"
- "level"
- chapters:
- - "suites"
data-start-level: 2 # 0, 1, 2, ...
chapters-start-level: 2 # 0, 1, 2, ...
@@ -1415,6 +1250,452 @@
layout:
"plot-throughput-speedup-analysis"
+# SRv6 - 10ge2p1x520 - NDR
+-
+ type: "plot"
+ title: "TSA: 78B-*-ethip6-ip6(base|scale)*ndrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-78B-srv6-tsa-ndrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '78B' and 'FEATURE' and 'NDRDISC' and 'IP6FWD' and 'SRv6'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "78B-*-ethip6-ip6(base|scale)*ndrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# SRv6 - 10ge2p1x520 - PDR
+-
+ type: "plot"
+ title: "TSA: 78B-*-ethip6-ip6(base|scale)*pdrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-78B-srv6-tsa-pdrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '78B' and 'FEATURE' and 'PDRDISC' and not 'NDRDISC' and 'IP6FWD' and 'SRv6'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "78B-*-ethip6-ip6(base|scale)*pdrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# IP4_overlay - NDR
+-
+ type: "plot"
+ title: "TSA: 64B-*-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-ethip4-tsa-ndrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '64B' and 'ENCAP' and 'NDRDISC' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST' and not 'IPSECHW'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# IP4_overlay - PDR
+-
+ type: "plot"
+ title: "TSA: 64B-*-ethip4[a-z0-9]+-[a-z0-9]*-pdrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-ethip4-tsa-pdrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '64B' and 'ENCAP' and 'PDRDISC' and not 'NDRDISC' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST' and not 'IPSECHW'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-ethip4[a-z0-9]+-[a-z0-9]*-pdrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# IP6_overlay - NDR
+-
+ type: "plot"
+ title: "TSA: 78B-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-78B-ethip6-tsa-ndrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '78B' and 'ENCAP' and 'NDRDISC' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "78B-*-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# IP6_overlay - PDR
+-
+ type: "plot"
+ title: "TSA: 78B-*-ethip6[a-z0-9]+-[a-z0-9]*-pdrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-78B-ethip6-tsa-pdrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '78B' and 'ENCAP' and 'PDRDISC' and not 'NDRDISC' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "78B-*-ethip6[a-z0-9]+-[a-z0-9]*-pdrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# VM VHOST - NDR
+-
+ type: "plot"
+ title: "TSA: 64B-*-.*vhost.*-ndrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-vhost-sel1-tsa-ndrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '64B' and 'NDRDISC' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD')"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-.*vhost.*-ndrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+-
+ type: "plot"
+ title: "TSA: 64B-*-.*vhost.*-ndrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/40ge2p1xl710-64B-vhost-sel1-tsa-ndrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-XL710' and '64B' and 'NDRDISC' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD')"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-.*vhost.*-ndrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+-
+ type: "plot"
+ title: "TSA: 64B-*-.*vhost.*-ndrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-vhost-sel2-tsa-ndrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '64B' and 'NDRDISC' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q' and not '2VM'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-.*vhost.*-ndrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+-
+ type: "plot"
+ title: "TSA: 64B-*-.*vhost.*-ndrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x710-64B-vhost-sel2-tsa-ndrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X710' and '64B' and 'NDRDISC' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-.*vhost.*-ndrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+-
+ type: "plot"
+ title: "TSA: 64B-*-.*vhost.*-ndrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/40ge2p1xl710-64B-vhost-sel2-tsa-ndrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-XL710' and '64B' and 'NDRDISC' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q' and not '2VM'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-.*vhost.*-ndrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# VM VHOST - PDR
+-
+ type: "plot"
+ title: "TSA: 64B-*-.*vhost.*-pdrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-vhost-sel1-tsa-pdrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '64B' and 'PDRDISC' and not 'NDRDISC' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD')"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-.*vhost.*-pdrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+-
+ type: "plot"
+ title: "TSA: 64B-*-.*vhost.*-pdrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/40ge2p1xl710-64B-vhost-sel1-tsa-pdrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-XL710' and '64B' and 'PDRDISC' and not 'NDRDISC' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD')"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-.*vhost.*-pdrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+-
+ type: "plot"
+ title: "TSA: 64B-*-.*vhost.*-pdrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-vhost-sel2-tsa-pdrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '64B' and 'PDRDISC' and not 'NDRDISC' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q' and not '2VM'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-.*vhost.*-pdrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+-
+ type: "plot"
+ title: "TSA: 64B-*-.*vhost.*-pdrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x710-64B-vhost-sel2-tsa-pdrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X710' and '64B' and 'PDRDISC' and not 'NDRDISC' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-.*vhost.*-pdrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+-
+ type: "plot"
+ title: "TSA: 64B-*-.*vhost.*-pdrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/40ge2p1xl710-64B-vhost-sel2-tsa-pdrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-XL710' and '64B' and 'PDRDISC' and not 'NDRDISC' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q' and not '2VM'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-.*vhost.*-pdrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# CRYPTO - NDR
+-
+ type: "plot"
+ title: "TSA: 64B-*-.*ipsec.*-ndrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/40ge2p1xl710-64B-ipsechw-tsa-ndrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-XL710' and '64B' and not 'VHOST' and 'IP4FWD' and 'NDRDISC' and 'IPSECHW' and ('IPSECTRAN' or 'IPSECTUN')"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-.*ipsec.*-ndrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# CRYPTO - PDR
+-
+ type: "plot"
+ title: "TSA: 64B-*-.*ipsec.*-pdrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/40ge2p1xl710-64B-ipsechw-tsa-pdrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-XL710' and '64B' and not 'VHOST' and 'IP4FWD' and 'PDRDISC' and not 'NDRDISC' and 'IPSECHW' and ('IPSECTRAN' or 'IPSECTUN')"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-.*ipsec.*-pdrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# Container memif - NDR
+-
+ type: "plot"
+ title: "TSA: 64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-container-memif-tsa-ndrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '64B' and 'BASE' and 'NDRDISC' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# Container memif - PDR
+-
+ type: "plot"
+ title: "TSA: 64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-container-memif-tsa-pdrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '64B' and 'BASE' and 'PDRDISC' and not 'NDRDISC' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# Container orchestrated - NDR
+-
+ type: "plot"
+ title: "TSA: 64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-container-orchestrated-tsa-ndrdisc"
+ data:
+ "plot-ligato-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '64B' and ('BASE' or 'SCALE') and 'NDRDISC' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+-
+ type: "plot"
+ title: "TSA: 64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x710-64B-container-orchestrated-tsa-ndrdisc"
+ data:
+ "plot-ligato-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X710' and '64B' and ('BASE' or 'SCALE') and 'NDRDISC' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# Container orchestrated - PDR
+-
+ type: "plot"
+ title: "TSA: 64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-container-orchestrated-tsa-pdrdisc"
+ data:
+ "plot-ligato-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '64B' and ('BASE' or 'SCALE') and 'PDRDISC' and not 'NDRDISC' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+-
+ type: "plot"
+ title: "TSA: 64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x710-64B-container-orchestrated-tsa-pdrdisc"
+ data:
+ "plot-ligato-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X710' and '64B' and ('BASE' or 'SCALE') and 'PDRDISC' and not 'NDRDISC' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
# Plot packets per second
# VPP L2 sel1
@@ -1683,7 +1964,7 @@
output-file: "{DIR[STATIC,VPP]}/78B-1t1c-ethip6-ip6-ndrdisc"
data:
"plot-vpp-throughput-latency"
- filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'NDRDISC' and '1T1C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST'"
+ filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'NDRDISC' and '1T1C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST' and not 'SRv6'"
parameters:
- "throughput"
- "parent"
@@ -1704,7 +1985,7 @@
output-file: "{DIR[STATIC,VPP]}/78B-2t2c-ethip6-ip6-ndrdisc"
data:
"plot-vpp-throughput-latency"
- filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'NDRDISC' and '2T2C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST'"
+ filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'NDRDISC' and '2T2C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST' and not 'SRv6'"
parameters:
- "throughput"
- "parent"
@@ -1725,7 +2006,7 @@
output-file: "{DIR[STATIC,VPP]}/78B-1t1c-ethip6-ip6-pdrdisc"
data:
"plot-vpp-throughput-latency"
- filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'PDRDISC' and not 'NDRDISC' and '1T1C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST'"
+ filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'PDRDISC' and not 'NDRDISC' and '1T1C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST' and not 'SRv6'"
parameters:
- "throughput"
- "parent"
@@ -1746,7 +2027,7 @@
output-file: "{DIR[STATIC,VPP]}/78B-2t2c-ethip6-ip6-pdrdisc"
data:
"plot-vpp-throughput-latency"
- filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'PDRDISC' and not 'NDRDISC' and '2T2C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST'"
+ filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'PDRDISC' and not 'NDRDISC' and '2T2C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST' and not 'SRv6'"
parameters:
- "throughput"
- "parent"
@@ -1759,6 +2040,91 @@
layout:
"plot-throughput"
+# VPP SRv6
+-
+ type: "plot"
+ title: "VPP Performance 78B-1t1c-ethip6*srv6*ndrdisc"
+ algorithm: "plot_performance_box"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/78B-1t1c-ethip6-srv6-ndrdisc"
+ data:
+ "plot-vpp-throughput-latency"
+ filter: "'78B' and 'FEATURE' and 'NDRDISC' and '1T1C' and 'IP6FWD' and 'SRv6'"
+ parameters:
+ - "throughput"
+ - "parent"
+ traces:
+ hoverinfo: "x+y"
+ boxpoints: "outliers"
+ whiskerwidth: 0
+ layout:
+ title: "78B-1t1c-ethip6*srv6*ndrdisc"
+ layout:
+ "plot-throughput"
+
+-
+ type: "plot"
+ title: "VPP Performance 78B-2t2c-ethip6*srv6*ndrdisc"
+ algorithm: "plot_performance_box"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/78B-2t2c-ethip6-srv6-ndrdisc"
+ data:
+ "plot-vpp-throughput-latency"
+ filter: "'78B' and 'FEATURE' and 'NDRDISC' and '2T2C' and 'IP6FWD' and 'SRv6'"
+ parameters:
+ - "throughput"
+ - "parent"
+ traces:
+ hoverinfo: "x+y"
+ boxpoints: "outliers"
+ whiskerwidth: 0
+ layout:
+ title: "78B-2t2c-ethip6*srv6*ndrdisc"
+ layout:
+ "plot-throughput"
+
+-
+ type: "plot"
+ title: "VPP Performance 78B-1t1c-ethip6*srv6*pdrdisc"
+ algorithm: "plot_performance_box"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/78B-1t1c-ethip6-srv6-pdrdisc"
+ data:
+ "plot-vpp-throughput-latency"
+ filter: "'78B' and 'FEATURE' and 'PDRDISC' and not 'NDRDISC' and '1T1C' and 'IP6FWD' and 'SRv6'"
+ parameters:
+ - "throughput"
+ - "parent"
+ traces:
+ hoverinfo: "x+y"
+ boxpoints: "outliers"
+ whiskerwidth: 0
+ layout:
+ title: "78B-1t1c-ethip6*srv6*pdrdisc"
+ layout:
+ "plot-throughput"
+
+-
+ type: "plot"
+ title: "VPP Performance 78B-2t2c-ethip6*srv6*pdrdisc"
+ algorithm: "plot_performance_box"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/78B-2t2c-ethip6-srv6-pdrdisc"
+ data:
+ "plot-vpp-throughput-latency"
+ filter: "'78B' and 'FEATURE' and 'PDRDISC' and not 'NDRDISC' and '2T2C' and 'IP6FWD' and 'SRv6'"
+ parameters:
+ - "throughput"
+ - "parent"
+ traces:
+ hoverinfo: "x+y"
+ boxpoints: "outliers"
+ whiskerwidth: 0
+ layout:
+ title: "78B-2t2c-ethip6*srv6*pdrdisc"
+ layout:
+ "plot-throughput"
+
# VPP IP4_overlay
-
type: "plot"
@@ -1863,45 +2229,8 @@
whiskerwidth: 0
layout:
title: "78B-1t1c-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc"
- xaxis:
- autorange: True
- autotick: False
- fixedrange: False
- gridcolor: "rgb(238, 238, 238)"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- tickmode: "linear"
- title: "Indexed Test Cases"
- zeroline: False
- yaxis:
- gridcolor: "rgb(238, 238, 238)'"
- hoverformat: ".4s"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- range: [2000000, 6000000]
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- title: "Packets Per Second [pps]"
- zeroline: False
- boxmode: "group"
- boxgroupgap: 0.5
- autosize: False
- margin:
- t: 50
- b: 20
- l: 50
- r: 20
- showlegend: True
- legend:
- orientation: "h"
- width: 700
- height: 1000
+ layout:
+ "plot-throughput"
-
type: "plot"
@@ -1963,45 +2292,8 @@
whiskerwidth: 0
layout:
title: "78B-2t2c-ethip6[a-z0-9]+-[a-z0-9]*-pdrdisc"
- xaxis:
- autorange: True
- autotick: False
- fixedrange: False
- gridcolor: "rgb(238, 238, 238)"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- tickmode: "linear"
- title: "Indexed Test Cases"
- zeroline: False
- yaxis:
- gridcolor: "rgb(238, 238, 238)'"
- hoverformat: ".4s"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- range: [4000000, 12000000]
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- title: "Packets Per Second [pps]"
- zeroline: False
- boxmode: "group"
- boxgroupgap: 0.5
- autosize: False
- margin:
- t: 50
- b: 20
- l: 50
- r: 20
- showlegend: True
- legend:
- orientation: "h"
- width: 700
- height: 1000
+ layout:
+ "plot-throughput"
# VPP VM VHOST
-
@@ -2022,45 +2314,8 @@
whiskerwidth: 0
layout:
title: "64B-1t1c-.*vhost.*-ndrdisc"
- xaxis:
- autorange: True
- autotick: False
- fixedrange: False
- gridcolor: "rgb(238, 238, 238)"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- tickmode: "linear"
- title: "Indexed Test Cases"
- zeroline: False
- yaxis:
- gridcolor: "rgb(238, 238, 238)'"
- hoverformat: ".4s"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- range: [0, 3500000]
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- title: "Packets Per Second [pps]"
- zeroline: False
- boxmode: "group"
- boxgroupgap: 0.5
- autosize: False
- margin:
- t: 50
- b: 20
- l: 50
- r: 20
- showlegend: True
- legend:
- orientation: "h"
- width: 700
- height: 1000
+ layout:
+ "plot-throughput"
-
type: "plot"
@@ -2295,323 +2550,6 @@
layout:
"plot-throughput"
-# DPDK
--
- type: "plot"
- title: "DPDK Performance 64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc"
- algorithm: "plot_performance_box"
- output-file-type: ".html"
- output-file: "{DIR[STATIC,DPDK]}/64B-1t1c-l2-ndrdisc"
- data:
- "plot-dpdk-throughput-latency"
- filter: "'64B' and 'BASE' and 'NDRDISC' and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
- parameters:
- - "throughput"
- - "parent"
- traces:
- hoverinfo: "x+y"
- boxpoints: "outliers"
- whiskerwidth: 0
- layout:
- title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc"
- layout:
- "plot-throughput"
-
--
- type: "plot"
- title: "DPDK Performance 64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc"
- algorithm: "plot_performance_box"
- output-file-type: ".html"
- output-file: "{DIR[STATIC,DPDK]}/64B-2t2c-l2-ndrdisc"
- data:
- "plot-dpdk-throughput-latency"
- filter: "'64B' and 'BASE' and 'NDRDISC' and '2T2C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
- parameters:
- - "throughput"
- - "parent"
- traces:
- hoverinfo: "x+y"
- boxpoints: "outliers"
- whiskerwidth: 0
- layout:
- title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc"
- layout:
- "plot-throughput"
-
--
- type: "plot"
- title: "DPDK Performance 64B-1t1c-ethip4-ip4base-l3fwd-ndrdisc"
- algorithm: "plot_performance_box"
- output-file-type: ".html"
- output-file: "{DIR[STATIC,DPDK]}/64B-1t1c-ipv4-ndrdisc"
- data:
- "plot-dpdk-throughput-latency"
- filter: "'64B' and 'BASE' and 'NDRDISC' and '1T1C' and 'IP4FWD'"
- parameters:
- - "throughput"
- - "parent"
- traces:
- hoverinfo: "x+y"
- boxpoints: "outliers"
- whiskerwidth: 0
- layout:
- title: "64B-1t1c-ethip4-ip4base-l3fwd-ndrdisc"
- xaxis:
- autorange: True
- autotick: False
- fixedrange: False
- gridcolor: "rgb(238, 238, 238)"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- tickmode: "linear"
- title: "Indexed Test Cases"
- zeroline: False
- yaxis:
- gridcolor: "rgb(238, 238, 238)'"
- hoverformat: ".4s"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- range: [2000000, 12000000]
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- title: "Packets Per Second [pps]"
- zeroline: False
- boxmode: "group"
- boxgroupgap: 0.5
- autosize: False
- margin:
- t: 50
- b: 20
- l: 50
- r: 20
- showlegend: True
- legend:
- orientation: "h"
- width: 700
- height: 1000
-
--
- type: "plot"
- title: "DPDK Performance 64B-2t2c-ethip4-ip4base-l3fwd-ndrdisc"
- algorithm: "plot_performance_box"
- output-file-type: ".html"
- output-file: "{DIR[STATIC,DPDK]}/64B-2t2c-ipv4-ndrdisc"
- data:
- "plot-dpdk-throughput-latency"
- filter: "'64B' and 'BASE' and 'NDRDISC' and '2T2C' and 'IP4FWD'"
- parameters:
- - "throughput"
- - "parent"
- traces:
- hoverinfo: "x+y"
- boxpoints: "outliers"
- whiskerwidth: 0
- layout:
- title: "64B-2t2c-ethip4-ip4base-l3fwd-ndrdisc"
- xaxis:
- autorange: True
- autotick: False
- fixedrange: False
- gridcolor: "rgb(238, 238, 238)"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- tickmode: "linear"
- title: "Indexed Test Cases"
- zeroline: False
- yaxis:
- gridcolor: "rgb(238, 238, 238)'"
- hoverformat: ".4s"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- range: [2000000, 12000000]
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- title: "Packets Per Second [pps]"
- zeroline: False
- boxmode: "group"
- boxgroupgap: 0.5
- autosize: False
- margin:
- t: 50
- b: 20
- l: 50
- r: 20
- showlegend: True
- legend:
- orientation: "h"
- width: 700
- height: 1000
-
--
- type: "plot"
- title: "DPDK Performance 64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc"
- algorithm: "plot_performance_box"
- output-file-type: ".html"
- output-file: "{DIR[STATIC,DPDK]}/64B-1t1c-l2-pdrdisc"
- data:
- "plot-dpdk-throughput-latency"
- filter: "'64B' and 'BASE' and 'PDRDISC' and not 'NDRDISC' and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
- parameters:
- - "throughput"
- - "parent"
- traces:
- hoverinfo: "x+y"
- boxpoints: "outliers"
- whiskerwidth: 0
- layout:
- title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc"
- layout:
- "plot-throughput"
-
--
- type: "plot"
- title: "DPDK Performance 64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc"
- algorithm: "plot_performance_box"
- output-file-type: ".html"
- output-file: "{DIR[STATIC,DPDK]}/64B-2t2c-l2-pdrdisc"
- data:
- "plot-dpdk-throughput-latency"
- filter: "'64B' and 'BASE' and 'PDRDISC' and not 'NDRDISC' and '2T2C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
- parameters:
- - "throughput"
- - "parent"
- traces:
- hoverinfo: "x+y"
- boxpoints: "outliers"
- whiskerwidth: 0
- layout:
- title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc"
- layout:
- "plot-throughput"
-
--
- type: "plot"
- title: "DPDK Performance 64B-1t1c-ethip4-ip4base-l3fwd-pdrdisc"
- algorithm: "plot_performance_box"
- output-file-type: ".html"
- output-file: "{DIR[STATIC,DPDK]}/64B-1t1c-ipv4-pdrdisc"
- data:
- "plot-dpdk-throughput-latency"
- filter: "'64B' and 'BASE' and 'PDRDISC' and not 'NDRDISC' and '1T1C' and 'IP4FWD'"
- parameters:
- - "throughput"
- - "parent"
- traces:
- hoverinfo: "x+y"
- boxpoints: "outliers"
- whiskerwidth: 0
- layout:
- title: "64B-1t1c-ethip4-ip4base-l3fwd-pdrdisc"
- xaxis:
- autorange: True
- autotick: False
- fixedrange: False
- gridcolor: "rgb(238, 238, 238)"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- tickmode: "linear"
- title: "Indexed Test Cases"
- zeroline: False
- yaxis:
- gridcolor: "rgb(238, 238, 238)'"
- hoverformat: ".4s"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- range: [20000000, 30000000]
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- title: "Packets Per Second [pps]"
- zeroline: False
- boxmode: "group"
- boxgroupgap: 0.5
- autosize: False
- margin:
- t: 50
- b: 20
- l: 50
- r: 20
- showlegend: True
- legend:
- orientation: "h"
- width: 700
- height: 1000
-
--
- type: "plot"
- title: "DPDK Performance 64B-2t2c-ethip4-ip4base-l3fwd-pdrdisc"
- algorithm: "plot_performance_box"
- output-file-type: ".html"
- output-file: "{DIR[STATIC,DPDK]}/64B-2t2c-ipv4-pdrdisc"
- data:
- "plot-dpdk-throughput-latency"
- filter: "'64B' and 'BASE' and 'PDRDISC' and not 'NDRDISC' and '2T2C' and 'IP4FWD'"
- parameters:
- - "throughput"
- - "parent"
- traces:
- hoverinfo: "x+y"
- boxpoints: "outliers"
- whiskerwidth: 0
- layout:
- title: "64B-2t2c-ethip4-ip4base-l3fwd-pdrdisc"
- xaxis:
- autorange: True
- autotick: False
- fixedrange: False
- gridcolor: "rgb(238, 238, 238)"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- tickmode: "linear"
- title: "Indexed Test Cases"
- zeroline: False
- yaxis:
- gridcolor: "rgb(238, 238, 238)'"
- hoverformat: ".4s"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- range: [20000000, 30000000]
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- title: "Packets Per Second [pps]"
- zeroline: False
- boxmode: "group"
- boxgroupgap: 0.5
- autosize: False
- margin:
- t: 50
- b: 20
- l: 50
- r: 20
- showlegend: True
- legend:
- orientation: "h"
- width: 700
- height: 1000
-
# Plot latency
# VPP L2 sel1
@@ -2740,7 +2678,7 @@
output-file: "{DIR[STATIC,VPP]}/78B-1t1c-ethip6-ip6-ndrdisc-lat50"
data:
"plot-vpp-throughput-latency"
- filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'NDRDISC' and '1T1C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST'"
+ filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'NDRDISC' and '1T1C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST' and not 'SRv6'"
parameters:
- "latency"
- "parent"
@@ -2759,7 +2697,7 @@
output-file: "{DIR[STATIC,VPP]}/78B-2t2c-ethip6-ip6-ndrdisc-lat50"
data:
"plot-vpp-throughput-latency"
- filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'NDRDISC' and '2T2C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST'"
+ filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'NDRDISC' and '2T2C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST' and not 'SRv6'"
parameters:
- "latency"
- "parent"
@@ -2770,6 +2708,45 @@
layout:
"plot-latency"
+# VPP SRv6
+-
+ type: "plot"
+ title: "VPP Latency 78B-1t1c-ethip6*srv6*ndrdisc"
+ algorithm: "plot_latency_box"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/78B-1t1c-ethip6-srv6-ndrdisc-lat50"
+ data:
+ "plot-vpp-throughput-latency"
+ filter: "'78B' and 'FEATURE' and 'NDRDISC' and '1T1C' and 'IP6FWD' and 'SRv6'"
+ parameters:
+ - "latency"
+ - "parent"
+ traces:
+ boxmean: False
+ layout:
+ title: "78B-1t1c-ethip6*srv6*ndrdisc"
+ layout:
+ "plot-latency"
+
+-
+ type: "plot"
+ title: "VPP Latency 78B-2t2c-ethip6*srv6*ndrdisc"
+ algorithm: "plot_latency_box"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/78B-2t2c-ethip6-srv6-ndrdisc-lat50"
+ data:
+ "plot-vpp-throughput-latency"
+ filter: "'78B' and 'FEATURE' and 'NDRDISC' and '2T2C' and 'IP6FWD' and 'SRv6'"
+ parameters:
+ - "latency"
+ - "parent"
+ traces:
+ boxmean: False
+ layout:
+ title: "78B-2t2c-ethip6*srv6*ndrdisc"
+ layout:
+ "plot-latency"
+
# VPP IP4_overlay
-
type: "plot"
@@ -2965,83 +2942,6 @@
layout:
"plot-latency"
-# DPDK
--
- type: "plot"
- title: "DPDK Latency 64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc"
- algorithm: "plot_latency_box"
- output-file-type: ".html"
- output-file: "{DIR[STATIC,DPDK]}/64B-1t1c-l2-ndrdisc-lat50"
- data:
- "plot-dpdk-throughput-latency"
- filter: "'64B' and 'BASE' and 'NDRDISC' and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
- parameters:
- - "latency"
- - "parent"
- traces:
- boxmean: False
- layout:
- title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc"
- layout:
- "plot-latency"
-
--
- type: "plot"
- title: "DPDK Latency 64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc"
- algorithm: "plot_latency_box"
- output-file-type: ".html"
- output-file: "{DIR[STATIC,DPDK]}/64B-2t2c-l2-ndrdisc-lat50"
- data:
- "plot-dpdk-throughput-latency"
- filter: "'64B' and 'BASE' and 'NDRDISC' and '2T2C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
- parameters:
- - "latency"
- - "parent"
- traces:
- boxmean: False
- layout:
- title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc"
- layout:
- "plot-latency"
-
--
- type: "plot"
- title: "DPDK Latency 64B-1t1c-ethip4-ip4base-l3fwd-ndrdisc"
- algorithm: "plot_latency_box"
- output-file-type: ".html"
- output-file: "{DIR[STATIC,DPDK]}/64B-1t1c-ipv4-ndrdisc-lat50"
- data:
- "plot-dpdk-throughput-latency"
- filter: "'64B' and 'BASE' and 'NDRDISC' and '1T1C' and 'IP4FWD'"
- parameters:
- - "latency"
- - "parent"
- traces:
- boxmean: False
- layout:
- title: "64B-1t1c-ethip4-ip4base-l3fwd-ndrdisc"
- layout:
- "plot-latency"
-
--
- type: "plot"
- title: "DPDK Latency 64B-2t2c-ethip4-ip4base-l3fwd-ndrdisc"
- algorithm: "plot_latency_box"
- output-file-type: ".html"
- output-file: "{DIR[STATIC,DPDK]}/64B-2t2c-ipv4-ndrdisc-lat50"
- data:
- "plot-dpdk-throughput-latency"
- filter: "'64B' and 'BASE' and 'NDRDISC' and '2T2C' and 'IP4FWD'"
- parameters:
- - "latency"
- - "parent"
- traces:
- boxmean: False
- layout:
- title: "64B-2t2c-ethip4-ip4base-l3fwd-ndrdisc"
- layout:
- "plot-latency"
-
# Ligato - Throughput
# Container memif
@@ -3063,45 +2963,8 @@
whiskerwidth: 0
layout:
title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc"
- xaxis:
- autorange: True
- autotick: False
- fixedrange: False
- gridcolor: "rgb(238, 238, 238)"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- tickmode: "linear"
- title: "Indexed Test Cases"
- zeroline: False
- yaxis:
- gridcolor: "rgb(238, 238, 238)'"
- hoverformat: ".4s"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- range: [0, 4500000]
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- title: "Packets Per Second [pps]"
- zeroline: False
- boxmode: "group"
- boxgroupgap: 0.5
- autosize: False
- margin:
- t: 50
- b: 20
- l: 50
- r: 20
- showlegend: True
- legend:
- orientation: "h"
- width: 700
- height: 1000
+ layout:
+ "plot-throughput"
-
type: "plot"
@@ -3121,45 +2984,8 @@
whiskerwidth: 0
layout:
title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc"
- xaxis:
- autorange: True
- autotick: False
- fixedrange: False
- gridcolor: "rgb(238, 238, 238)"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- tickmode: "linear"
- title: "Indexed Test Cases"
- zeroline: False
- yaxis:
- gridcolor: "rgb(238, 238, 238)'"
- hoverformat: ".4s"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- range: [0, 8000000]
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- title: "Packets Per Second [pps]"
- zeroline: False
- boxmode: "group"
- boxgroupgap: 0.5
- autosize: False
- margin:
- t: 50
- b: 20
- l: 50
- r: 20
- showlegend: True
- legend:
- orientation: "h"
- width: 700
- height: 1000
+ layout:
+ "plot-throughput"
-
type: "plot"
@@ -3179,45 +3005,8 @@
whiskerwidth: 0
layout:
title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc"
- xaxis:
- autorange: True
- autotick: False
- fixedrange: False
- gridcolor: "rgb(238, 238, 238)"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- tickmode: "linear"
- title: "Indexed Test Cases"
- zeroline: False
- yaxis:
- gridcolor: "rgb(238, 238, 238)'"
- hoverformat: ".4s"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- range: [0, 4500000]
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- title: "Packets Per Second [pps]"
- zeroline: False
- boxmode: "group"
- boxgroupgap: 0.5
- autosize: False
- margin:
- t: 50
- b: 20
- l: 50
- r: 20
- showlegend: True
- legend:
- orientation: "h"
- width: 700
- height: 1000
+ layout:
+ "plot-throughput"
-
type: "plot"
@@ -3237,45 +3026,8 @@
whiskerwidth: 0
layout:
title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc"
- xaxis:
- autorange: True
- autotick: False
- fixedrange: False
- gridcolor: "rgb(238, 238, 238)"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- tickmode: "linear"
- title: "Indexed Test Cases"
- zeroline: False
- yaxis:
- gridcolor: "rgb(238, 238, 238)'"
- hoverformat: ".4s"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- range: [0, 8000000]
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- title: "Packets Per Second [pps]"
- zeroline: False
- boxmode: "group"
- boxgroupgap: 0.5
- autosize: False
- margin:
- t: 50
- b: 20
- l: 50
- r: 20
- showlegend: True
- legend:
- orientation: "h"
- width: 700
- height: 1000
+ layout:
+ "plot-throughput"
# Container orchestrated
-
diff --git a/resources/tools/presentation/specification_parser.py b/resources/tools/presentation/specification_parser.py
index 207507e3b6..ebd84530a3 100644
--- a/resources/tools/presentation/specification_parser.py
+++ b/resources/tools/presentation/specification_parser.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 Cisco and/or its affiliates.
+# Copyright (c) 2018 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -49,7 +49,6 @@ class Specification(object):
self._specification = {"environment": dict(),
"configuration": dict(),
- "debug": dict(),
"static": dict(),
"input": dict(),
"output": dict(),
@@ -95,15 +94,6 @@ class Specification(object):
return self._specification["static"]
@property
- def debug(self):
- """Getter - debug
-
- :returns: Debug specification
- :rtype: dict
- """
- return self._specification["debug"]
-
- @property
def is_debug(self):
"""Getter - debug mode
@@ -425,43 +415,6 @@ class Specification(object):
self.configuration["data-sets"][set_name][job] = builds
logging.info("Done.")
- def _parse_debug(self):
- """Parse debug specification in the specification YAML file.
- """
-
- if int(self.environment["configuration"]["CFG[DEBUG]"]) != 1:
- return None
-
- logging.info("Parsing specification file: debug ...")
-
- idx = self._get_type_index("debug")
- if idx is None:
- self.environment["configuration"]["CFG[DEBUG]"] = 0
- return None
-
- try:
- for key, value in self._cfg_yaml[idx]["general"].items():
- self._specification["debug"][key] = value
-
- self._specification["input"]["builds"] = dict()
- for job, builds in self._cfg_yaml[idx]["builds"].items():
- if builds:
- self._specification["input"]["builds"][job] = list()
- for build in builds:
- self._specification["input"]["builds"][job].\
- append({"build": build["build"],
- "status": "downloaded",
- "file-name": self._replace_tags(
- build["file"],
- self.environment["paths"])})
- else:
- logging.warning("No build is defined for the job '{}'. "
- "Trying to continue without it.".
- format(job))
-
- except KeyError:
- raise PresentationError("No data to process.")
-
def _parse_input(self):
"""Parse input specification in the specification YAML file.
@@ -561,6 +514,13 @@ class Specification(object):
except KeyError:
pass
+ try:
+ element["input-file"] = self._replace_tags(
+ element["input-file"],
+ self._specification["environment"]["paths"])
+ except KeyError:
+ pass
+
# add data sets to the elements:
if isinstance(element.get("data", None), str):
data_set = element["data"]
@@ -657,9 +617,7 @@ class Specification(object):
self._parse_env()
self._parse_configuration()
- self._parse_debug()
- if not self.debug:
- self._parse_input()
+ self._parse_input()
self._parse_output()
self._parse_static()
self._parse_elements()
diff --git a/resources/tools/presentation/utils.py b/resources/tools/presentation/utils.py
index 8365bfad5c..0a9d985a88 100644
--- a/resources/tools/presentation/utils.py
+++ b/resources/tools/presentation/utils.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 Cisco and/or its affiliates.
+# Copyright (c) 2018 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -14,6 +14,7 @@
"""General purpose utilities.
"""
+import multiprocessing
import subprocess
import numpy as np
import pandas as pd
@@ -21,7 +22,7 @@ import logging
from os import walk, makedirs, environ
from os.path import join, isdir
-from shutil import copy, Error
+from shutil import move, Error
from math import sqrt
from errors import PresentationError
@@ -68,58 +69,69 @@ def relative_change(nr1, nr2):
return float(((nr2 - nr1) / nr1) * 100)
-def remove_outliers(input_data, outlier_const):
- """
+def remove_outliers(input_list, outlier_const=1.5, window=14):
+ """Return list with outliers removed, using split_outliers.
- :param input_data: Data from which the outliers will be removed.
+ :param input_list: Data from which the outliers will be removed.
:param outlier_const: Outlier constant.
- :type input_data: list
+ :param window: How many preceding values to take into account.
+ :type input_list: list of floats
:type outlier_const: float
+ :type window: int
:returns: The input list without outliers.
- :rtype: list
+ :rtype: list of floats
"""
- data = np.array(input_data)
+ data = np.array(input_list)
upper_quartile = np.percentile(data, 75)
lower_quartile = np.percentile(data, 25)
iqr = (upper_quartile - lower_quartile) * outlier_const
quartile_set = (lower_quartile - iqr, upper_quartile + iqr)
result_lst = list()
- for y in data.tolist():
+ for y in input_list:
if quartile_set[0] <= y <= quartile_set[1]:
result_lst.append(y)
return result_lst
-def find_outliers(input_data, outlier_const=1.5):
+def split_outliers(input_series, outlier_const=1.5, window=14):
"""Go through the input data and generate two pandas series:
- - input data without outliers
+ - input data with outliers replaced by NAN
- outliers.
The function uses IQR to detect outliers.
- :param input_data: Data to be examined for outliers.
+ :param input_series: Data to be examined for outliers.
:param outlier_const: Outlier constant.
- :type input_data: pandas.Series
+ :param window: How many preceding values to take into account.
+ :type input_series: pandas.Series
:type outlier_const: float
- :returns: Tuple: input data with outliers removed; Outliers.
- :rtype: tuple (trimmed_data, outliers)
+ :type window: int
+ :returns: Input data with NAN outliers and Outliers.
+ :rtype: (pandas.Series, pandas.Series)
"""
- upper_quartile = input_data.quantile(q=0.75)
- lower_quartile = input_data.quantile(q=0.25)
- iqr = (upper_quartile - lower_quartile) * outlier_const
- low = lower_quartile - iqr
- high = upper_quartile + iqr
+ list_data = list(input_series.items())
+ head_size = min(window, len(list_data))
+ head_list = list_data[:head_size]
trimmed_data = pd.Series()
outliers = pd.Series()
- for item in input_data.items():
- item_pd = pd.Series([item[1], ], index=[item[0], ])
- if low <= item[1] <= high:
+ for item_x, item_y in head_list:
+ item_pd = pd.Series([item_y, ], index=[item_x, ])
+ trimmed_data = trimmed_data.append(item_pd)
+ for index, (item_x, item_y) in list(enumerate(list_data))[head_size:]:
+ y_rolling_list = [y for (x, y) in list_data[index - head_size:index]]
+ y_rolling_array = np.array(y_rolling_list)
+ q1 = np.percentile(y_rolling_array, 25)
+ q3 = np.percentile(y_rolling_array, 75)
+ iqr = (q3 - q1) * outlier_const
+ low = q1 - iqr
+ item_pd = pd.Series([item_y, ], index=[item_x, ])
+ if low <= item_y:
trimmed_data = trimmed_data.append(item_pd)
else:
- trimmed_data = trimmed_data.append(pd.Series([np.nan, ],
- index=[item[0], ]))
outliers = outliers.append(item_pd)
+ nan_pd = pd.Series([np.nan, ], index=[item_x, ])
+ trimmed_data = trimmed_data.append(nan_pd)
return trimmed_data, outliers
@@ -129,7 +141,7 @@ def get_files(path, extension=None, full_path=True):
:param path: Path to files.
:param extension: Extension of files to process. If it is the empty string,
- all files will be processed.
+ all files will be processed.
:param full_path: If True, the files with full path are generated.
:type path: str
:type extension: str
@@ -187,8 +199,10 @@ def execute_command(cmd):
stdout, stderr = proc.communicate()
- logging.info(stdout)
- logging.info(stderr)
+ if stdout:
+ logging.info(stdout)
+ if stderr:
+ logging.info(stderr)
if proc.returncode != 0:
logging.error(" Command execution failed.")
@@ -239,10 +253,7 @@ def archive_input_data(spec):
logging.info(" Archiving the input data files ...")
- if spec.is_debug:
- extension = spec.debug["input-format"]
- else:
- extension = spec.input["file-format"]
+ extension = spec.input["file-format"]
data_files = get_files(spec.environment["paths"]["DIR[WORKING,DATA]"],
extension=extension)
dst = spec.environment["paths"]["DIR[STATIC,ARCH]"]
@@ -253,11 +264,93 @@ def archive_input_data(spec):
makedirs(dst)
for data_file in data_files:
- logging.info(" Copying the file: {0} ...".format(data_file))
- copy(data_file, dst)
+ logging.info(" Moving the file: {0} ...".format(data_file))
+ move(data_file, dst)
except (Error, OSError) as err:
raise PresentationError("Not possible to archive the input data.",
str(err))
logging.info(" Done.")
+
+
+def classify_anomalies(data, window):
+ """Evaluates if the sample value is an outlier, regression, normal or
+ progression compared to the previous data within the window.
+ We use the intervals defined as:
+ - regress: less than trimmed moving median - 3 * stdev
+ - normal: between trimmed moving median - 3 * stdev and median + 3 * stdev
+ - progress: more than trimmed moving median + 3 * stdev
+ where stdev is trimmed moving standard deviation.
+
+ :param data: Full data set with the outliers replaced by nan.
+ :param window: Window size used to calculate moving average and moving
+ stdev.
+ :type data: pandas.Series
+ :type window: int
+ :returns: Evaluated results.
+ :rtype: list
+ """
+
+ if data.size < 3:
+ return None
+
+ win_size = data.size if data.size < window else window
+ tmm = data.rolling(window=win_size, min_periods=2).median()
+ tmstd = data.rolling(window=win_size, min_periods=2).std()
+
+ classification = ["normal", ]
+ first = True
+ for build, value in data.iteritems():
+ if first:
+ first = False
+ continue
+ if np.isnan(value) or np.isnan(tmm[build]) or np.isnan(tmstd[build]):
+ classification.append("outlier")
+ elif value < (tmm[build] - 3 * tmstd[build]):
+ classification.append("regression")
+ elif value > (tmm[build] + 3 * tmstd[build]):
+ classification.append("progression")
+ else:
+ classification.append("normal")
+ return classification
+
+
+class Worker(multiprocessing.Process):
+ """Worker class used to process tasks in separate parallel processes.
+ """
+
+ def __init__(self, work_queue, data_queue, func):
+ """Initialization.
+
+ :param work_queue: Queue with items to process.
+ :param data_queue: Shared memory between processes. Queue which keeps
+ the result data. This data is then read by the main process and used
+ in further processing.
+ :param func: Function which is executed by the worker.
+ :type work_queue: multiprocessing.JoinableQueue
+ :type data_queue: multiprocessing.Manager().Queue()
+ :type func: Callable object
+ """
+ super(Worker, self).__init__()
+ self._work_queue = work_queue
+ self._data_queue = data_queue
+ self._func = func
+
+ def run(self):
+ """Method representing the process's activity.
+ """
+
+ while True:
+ try:
+ self.process(self._work_queue.get())
+ finally:
+ self._work_queue.task_done()
+
+ def process(self, item_to_process):
+ """Method executed by the runner.
+
+ :param item_to_process: Data to be processed by the function.
+ :type item_to_process: tuple
+ """
+ self._func(self.pid, self._data_queue, *item_to_process)