aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools/presentation/doc/graphs_improvements.rst
blob: 336faf9748f56e822721715ec38cc428b834d37a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108

@media only all and (prefers-color-scheme: dark) {
.highlight .hll { background-color: #49483e }
.highlight .c { color: #75715e } /* Comment */
.highlight .err { color: #960050; background-color: #1e0010 } /* Error */
.highlight .k { color: #66d9ef } /* Keyword */
.highlight .l { color: #ae81ff } /* Literal */
.highlight .n { color: #f8f8f2 } /* Name */
.highlight .o { color: #f92672 } /* Operator */
.highlight .p { color: #f8f8f2 } /* Punctuation */
.highlight .ch { color: #75715e } /* Comment.Hashbang */
.highlight .cm { color: #75715e } /* Comment.Multiline */
.highlight .cp { color: #75715e } /* Comment.Preproc */
.highlight .cpf { color: #75715e } /* Comment.PreprocFile */
.highlight .c1 { color: #75715e } /* Comment.Single */
.highlight .cs { color: #75715e } /* Comment.Special */
.highlight .gd { color: #f92672 } /* Generic.Deleted */
.highlight .ge { font-style: italic } /* Generic.Emph */
.highlight .gi { color: #a6e22e } /* Generic.Inserted */
.highlight .gs { font-weight: bold } /* Generic.Strong */
.highlight .gu { color: #75715e } /* Generic.Subheading */
.highlight .kc { color: #66d9ef } /* Keyword.Constant */
.highlight .kd { color: #66d9ef } /* Keyword.Declaration */
.highlight .kn { color: #f92672 } /* Keyword.Namespace */
.highlight .kp { color: #66d9ef } /* Keyword.Pseudo */
.highlight .kr { color: #66d9ef } /* Keyword.Reserved */
.highlight .kt { color: #66d9ef } /* Keyword.Type */
.highlight .ld { color: #e6db74 } /* Literal.Date */
.highlight .m { color: #ae81ff } /* Literal.Number */
.highlight .s { color: #e6db74 } /* Literal.String */
.highlight .na { color: #a6e22e } /* Name.Attribute */
.highlight .nb { color: #f8f8f2 } /* Name.Builtin */
.highlight .nc { color: #a6e22e } /* Name.Class */
.highlight .no { color: #66d9ef } /* Name.Constant */
.highlight .nd { color: #a6e22e } /* Name.Decorator */
.highlight .ni { color: #f8f8f2 } /* Name.Entity */
.highlight .ne { color: #a6e22e } /* Name.Exception */
.highlight .nf { color: #a6e22e } /* Name.Function */
.highlight .nl { color: #f8f8f2 } /* Name.Label */
.highlight .nn { color: #f8f8f2 } /* Name.Namespace */
.highlight .nx { color: #a6e22e } /* Name.Other */
.highlight .py { color: #f8f8f2 } /* Name.Property */
.highlight .nt { color: #f92672 } /* Name.Tag */
.highlight .nv { color: #f8f8f2 } /* Name.Variable */
.highlight .ow { color: #f92672 } /* Operator.Word */
.highlight .w { color: #f8f8f2 } /* Text.Whitespace */
.highlight .mb { color: #ae81ff } /* Literal.Number.Bin */
.highlight .mf { color: #ae81ff } /* Literal.Number.Float */
.highlight .mh { color: #ae81ff } /* Literal.Number.Hex */
.highlight .mi { color: #ae81ff } /* Literal.Number.Integer */
.highlight .mo { color: #ae81ff } /* Literal.Number.Oct */
.highlight .sa { color: #e6db74 } /* Literal.String.Affix */
.highlight .sb { color: #e6db74 } /* Literal.String.Backtick */
.highlight .sc { color: #e6db74 } /* Literal.String.Char */
.highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */
.highlight .sd { color: #e6db74 } /* Literal.String.Doc */
.highlight .s2 { color: #e6db74 } /* Literal.String.Double */
.highlight .se { color: #ae81ff } /* Literal.String.Escape */
.highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */
.highlight .si { color: #e6db74 } /* Literal.String.Interpol */
.highlight .sx { color: #e6db74 } /* Literal.String.Other */
.highlight .sr { color: #e6db74 } /* Literal.String.Regex */
.highlight .s1 { color: #e6db74 } /* Literal.String.Single */
.highlight .ss { color: #e6db74 } /* Literal.String.Symbol */
.highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */
.highlight .fm { color: #a6e22e } /* Name.Function.Magic */
.highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */
.highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */
.highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */
.highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */
.highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */
}
@media (prefers-color-scheme: light) {
.highlight .hll { background-color: #ffffcc }
.highlight .c { color: #888888 } /* Comment */
.highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */
.highlight .k { color: #008800; font-weight: bold } /* Keyword */
.highlight .ch { color: #888888 } /* Comment.Hashbang */
.highlight .cm { color: #888888 } /* Comment.Multiline */
.highlight .cp { color: #cc0000; font-weight: bold } /* Comment.Preproc */
.highlight .cpf { color: #888888 } /* Comment.PreprocFile */
.highlight .c1 { color: #888888 } /* Comment.Single */
.highlight .cs { color: #cc0000; font-weight: bold; background-color: #fff0f0 } /* Comment.Special */
.highlight .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */
.highlight .ge { font-style: italic } /* Generic.Emph */
.highlight .gr { color: #aa0000 } /* Generic.Error */
.highlight .gh { color: #333333 } /* Generic.Heading */
.highlight .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */
.highlight .go { color: #888888 } /* Generic.Output */
.highlight .gp { color: #555555 } /* Generic.Prompt */
.highlight .gs { font-weight: bold } /* Generic.Strong */
.highlight .gu { color: #666666 } /* Generic.Subheading */
.highlight .gt { color: #aa0000 } /* Generic.Traceback */
.highlight .kc { color: #008800; font-weight: bold } /* Keyword.Constant */
.highlight .kd { color: #008800; font-weight: bold } /* Keyword.Declaration */
.highlight .kn { color: #008800; font-weight: bold } /* Keyword.Namespace */
.highlight .kp { color: #008800 } /* Keyword.Pseudo */
.highlight .kr { color: #008800; font-weight: bold } /* Keyword.Reserved */
.highlight .kt { color: #888888; font-weight: bold } /* Keyword.Type */
.highlight .m { color: #0000DD; font-weight: bold } /* Literal.Number */
.highlight .s { color: #dd2200; background-color: #fff0f0 } /* Literal.String */
.highlight .na { color: #336699 } /* Name.Attribute */
.highlight .nb { color: #003388 } /* Name.Builtin */
.highlight .nc { color: #bb0066; font-weight: bold } /* Name.Class */
.highlight .no { color: #003366; font-weight: bold } /* Name.Constant */
.highlight .nd { color: #555555 } /* Name.Decorator */
.highlight .ne { color: #bb0066; font-weight: bold } /* Name.Exception */
.highlight .nf { color: #0066bb; font-weight: bold } /* Name.Function */
.highlight .nl { color: #336699; font-style: italic } /* Name.Label */
.highlight .nn { color: #bb0066; font-weight: bold } /* Name.Namespace */
.highlight .py { color: #336699; font-weight: bold } /* Name.Property */
.highlight .nt { color: #bb0066; font-weight: bold } /* Name.Tag */
.highlight .nv { color: #336699 } /* Name.Variable */
.highlight .ow { color: #008800 } /* Operator.Word */
.highlight .w { color: #bbbbbb } /* Text.Whitespace */
.highlight .mb { color: #0000DD; font-weight: bold } /* Literal.Number.Bin */
.highlight .mf { color: #0000DD; font-weight: bold } /* Literal.Number.Float */
.highlight .mh { color: #0000DD; font-weight: bold } /* Literal.Number.Hex */
.highlight .mi { color: #0000DD; font-weight: bold } /* Literal.Number.Integer */
.highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */
.highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */
.highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */
.highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */
.highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */
.highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */
.highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */
.highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */
.highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */
.highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */
.highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */
.highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */
.highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */
.highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */
.highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */
.highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */
.highlight .vc { color: #336699 } /* Name.Variable.Class */
.highlight .vg { color: #dd7700 } /* Name.Variable.Global */
.highlight .vi { color: #3333bb } /* Name.Variable.Instance */
.highlight .vm { color: #336699 } /* Name.Variable.Magic */
.highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */
}
.PHONY: verify-python-path

VPP_TEST_FAILED_DIR=/tmp/vpp-failed-unittests/

verify-python-path:
ifndef VPP_PYTHON_PREFIX
	$(error VPP_PYTHON_PREFIX is not set)
endif

.PHONY: verify-no-running-vpp

ifdef VPP_ZOMBIE_NOCHECK
VPP_PIDS=
else
VPP_PIDS=$(shell pgrep -d, -x vpp_main)
endif

ifeq ($(DEBUG),gdb)
FORCE_FOREGROUND=1
else ifeq ($(DEBUG),gdbserver)
FORCE_FOREGROUND=1
else ifeq ($(DEBUG),core)
FORCE_FOREGROUND=1
else
FORCE_FOREGROUND=0
endif

ifdef PROFILE_OUTPUT
PROFILE_OUTPUT_OPTS=-o $(PROFILE_OUTPUT)
endif

ifndef PROFILE_SORT_BY
PROFILE_SORT_BY=cumtime
endif

ifeq ($(PROFILE),1)
PYTHON_PROFILE_OPTS=-m cProfile $(PROFILE_OUTPUT_OPTS) -s $(PROFILE_SORT_BY)
FORCE_FOREGROUND=1
endif

verify-no-running-vpp:
	@if [ "$(VPP_PIDS)" != "" ]; then \
		echo; \
		echo "*** Existing vpp processes detected (PID(s): $(VPP_PIDS)). Running tests under these conditions is not supported. ***"; \
		echo; \
		ps -fp $(VPP_PIDS);\
		echo; \
		false; \
	fi

UNITTEST_EXTRA_OPTS=
UNITTEST_FAILFAST_OPTS=

ifeq ($(FAILFAST),1)
UNITTEST_EXTRA_OPTS=-f
endif

ifneq ($(EXTERN_TESTS),)
UNITTEST_EXTRA_OPTS=$(UNITTEST_FAILFAST_OPTS) -d $(EXTERN_TESTS)
endif

ifeq ($(TEST_DEBUG),1)
VPP_PYTHON_PREFIX:=$(VPP_PYTHON_PREFIX)/debug
PYTHON_EXTRA_DEPENDS=objgraph pympler
else
PYTHON_EXTRA_DEPENDS=
endif

PYTHON_VENV_PATH=$(VPP_PYTHON_PREFIX)/virtualenv
PYTHON_DEPENDS=$(PYTHON_EXTRA_DEPENDS) psutil faulthandler six scapy==2.4.0 pexpect cryptography subprocess32 cffi git+https://github.com/vpp-dev/py-lispnetworking
SCAPY_SOURCE=$(shell find $(PYTHON_VENV_PATH) -name site-packages)
BUILD_COV_DIR=$(BR)/test-cov

GET_PIP_SCRIPT=$(VPP_PYTHON_PREFIX)/get-pip.py
PIP_INSTALL_DONE=$(VPP_PYTHON_PREFIX)/pip-install.done
PIP_PATCH_DONE=$(VPP_PYTHON_PREFIX)/pip-patch.done
PAPI_INSTALL_DONE=$(VPP_PYTHON_PREFIX)/papi-install.done

PAPI_INSTALL_FLAGS=$(PIP_INSTALL_DONE) $(PIP_PATCH_DONE) $(PAPI_INSTALL_DONE)

ifeq ($(PYTHON),)
PYTHON_INTERP=python2.7
else
PYTHON_INTERP=$(PYTHON)
endif

$(GET_PIP_SCRIPT):
	@mkdir -p $(VPP_PYTHON_PREFIX)
	@bash -c "cd $(VPP_PYTHON_PREFIX) && curl -O https://bootstrap.pypa.io/get-pip.py"

$(PIP_INSTALL_DONE): $(GET_PIP_SCRIPT)
	@virtualenv $(PYTHON_VENV_PATH) -p $(PYTHON_INTERP)
	@bash -c "source $(PYTHON_VENV_PATH)/bin/activate && python $(GET_PIP_SCRIPT)"
	@bash -c "source $(PYTHON_VENV_PATH)/bin/activate && pip install $(PYTHON_DEPENDS)"
	@touch $@

$(PIP_PATCH_DONE): $(PIP_INSTALL_DONE)
	@echo --- patching ---
	@sleep 1 # Ensure python recompiles patched *.py files -> *.pyc
	for f in $(CURDIR)/patches/scapy-2.4/*.patch ; do \
		echo Applying patch: $$(basename $$f) ; \
		patch -p1 -d $(SCAPY_SOURCE) < $$f ; \
	done
	@touch $@

$(PAPI_INSTALL_DONE): $(PIP_PATCH_DONE)
	@bash -c "source $(PYTHON_VENV_PATH)/bin/activate && cd $(WS_ROOT)/src/vpp-api/python && python setup.py install"
	@touch $@

define retest-func
@env FORCE_FOREGROUND=$(FORCE_FOREGROUND) VPP_TEST_FAILED_DIR=$(VP
================================
 Envisioning information by PAL
================================

Introduction
------------

This document describes possible improvements in data presentation provided by
PAL for the `Report <https://docs.fd.io/csit/master/report/>`_ and the
`Trending <https://docs.fd.io/csit/master/trending/>`_

You can generate a standalone html version of this document using e.g.
rst2html5 tool:

.. code:: bash

    rst2html5 --stylesheet graphs_improvements.css graphs_improvements.rst >> graphs_improvements.html

**Modifications of existing graphs**

- `Speedup Multi-core`_
- `Packet Throughput`_
- `Packet Latency`_
- `HTTP-TCP Performance`_

**New graphs to be added**

- `Comparison between releases`_
- `Comparison between processor architectures`_
- `Comparison between 2-node and 3-node topologies`_
- `Comparison between different physical testbed instances`_
- `Comparison between NICs`_
- `Other comparisons`_

**Export of static images**

- low priority
- make possible to `export static images`_ which are available via link on the
  web page.
- vector formats (svg, pdf) are preferred

Priorities
----------

**Target CSIT-18.10**

- `Speedup Multi-core`_
- `Packet Throughput`_

**Nice to have in CSIT-18.10**

.. note::

    Only if above done, and target CSIT-18.10 is in time , otherwise next
    release.

- `Packet Latency`_
- `HTTP-TCP Performance`_

Modifications of existing graphs
--------------------------------

The proposed modifications include the changes in:

- the layout of the graphs,
- the data and way how it is presented,
- the test cases presented in the graphs.

The first two points are described below, the last one will be added later as a
separate chapter.

.. _Speedup Multi-core:

Speedup Multi-core
``````````````````

The "Speedup Multicore" graph will display the measured data together with
perfect values calculated as multiples of the best value measured using one
core. The relative difference between measured and perfect values will be
displayed in the hover next to each data point.

.. image:: pic/graph-speedup.svg
    :width: 800 px
    :scale: 50 %
    :align: center
    :alt: Graph "Speedup Multi-core: not found.

**Description:**

*Data displayed:*

- one or more data series from the same area, keep the number of displayed
  data series as low as possible (max 6)
- x-axis: number of cores
- y-axis: throughput (measured and perfect) [Mpps], linear scale, beginning
  with 0
- hover information: Throughput [Mpps], Speedup [1], Relative difference between
  measured and ideal values [%], Perfect Throughput [%]
- Limits of ethernet links, NICs and PCIe. See `Physical performance limits`_.

*Layout:*

- plot type: lines with data points (plotly.graph_objs.Scatter)
- data series format:
    - measured: solid line with data points
    - perfect: dashed line with data points, the same color as "measured"
- title: "Speedup Multi-core: <area, scaling, features, ...>",
  top, centered, font size 18; configurable in specification file: visible /
  hidden, text
- x-axis: integers, starting with 1 (core), linear, font size 16, bottom
- x-axis label: "Number of cores [qty]", bottom, centered, font size 16
- y-axis: float, starting with 0, dynamic range, linear, font size 16, left
- y-axis label: "Packet Throughput [Mpps]", middle, left
- legend: list of presented test cases, bottom, left, font size 16; the order
  of displayed tests is configurable in the specification file
- annotation: text: "dashed: perfect<br>solid: measured", top, left,
  font size 16

.. _Packet Throughput:

Packet Throughput
`````````````````

The "Packet Throughput" graph will display the measured data using
statistical box graph. Each data point is constructed from 10 samples.
The statistical data are displayed as hover information.

.. image:: pic/graph-throughput.svg
    :width: 800 px
    :scale: 50 %
    :align: center
    :alt: Graph "Packet Throughput" not found.

**Description:**

*Data displayed:*

- one or more data points from the same area, keep the number of displayed
  data points as low as possible (max 6)
- x-axis: indexed test cases
- y-axis: throughput [Mpps], linear scale, beginning with 0
- hover information: statistical data (min, lower fence, q1, median, q3,
  higher fence, max), test case name

*Layout:*

- plot type: statistical box (plotly.graph_objs.Box)
- data series format: box
- title: "Packet Throughput: <area, scaling, features, framesize, cores, ...>",
  top, centered, font size 18; configurable in specification file: visible /
  hidden, text
- x-axis: integers, starting with 1, linear, font size 16, bottom; the order
  of displayed tests is configurable in the specification file
- x-axis label: "Indices of Test Cases [Index]", bottom, centered, font size 16
- y-axis: floats, starting with 0, dynamic range, linear, font size 16, left
- y-axis label: "Packet Throughput [Mpps]", middle, left
- legend: "Indexed Test Cases [idx]", bottom, left, font size 16

.. _Packet Latency:

Packet Latency
``````````````

The "Packet Latency" graph will display the measured data using
statistical box graph. Each data point is constructed from 10 samples.
The statistical data are displayed as hover information.

.. image:: pic/graph-latency.svg
    :width: 800 px
    :scale: 50 %
    :align: center
    :alt: Graph "Packet Latency" not found.

**Description:**

*Data displayed:*

- one or more data points from the same area, keep the number of displayed
  data points as low as possible (max 6)
- x-axis: data flow directions
- y-axis: latency min/avg/max [uSec], linear scale, beginning with 0
- hover information: statistical data (min, avg, max), test case name, direction

*Layout:*

- plot type: scatter with errors (plotly.graph_objs.Scatter)
- data series format: data point with min amd max values
- title: "Packet Latency: <area, scaling, features, framesize, cores, ...>",
  top, centered, font size 18; configurable in specification file: visible /
  hidden, text
- x-axis: text, font size 16, bottom; the order of displayed tests is
  configurable in the specification file
- x-axis label: "Direction", bottom, centered
- y-axis: integers, starting with 0, dynamic range, linear, font size 16, left
- y-axis label: "Packet Latency min/avg/max [uSec]", middle, left
- legend: "Indexed Test Cases [idx]", bottom, left, font size 16

.. _HTTP-TCP Performance:

HTTP/TCP Performance
````````````````````

The "HTTP/TCP Performance" graph will display the measured data using
statistical box graph separately for "Connections per second" and "Requests per
second". Each data point is constructed from 10 samples. The statistical data
are displayed as hover information.

.. image:: pic/graph-http-cps.svg
    :width: 800 px
    :scale: 50 %
    :align: center
    :alt: Graph "HTTP/TCP Performance" not found.

.. image:: pic/graph-http-rps.svg
    :width: 800 px
    :scale: 50 %
    :align: center
    :alt: Graph "HTTP/TCP Performance" not found.

**Description:**

*Data displayed:*

- requests / connections per second, the same tests configured for 1, 2 and
  4 cores (3 data points in each graph)
- x-axis: indexed test cases
- y-axis: requests/connections per second, linear scale, beginning with 0
- hover information: statistical data (min, lower fence, q1, median, q3,
  higher fence, max), test case name

*Layout:*

- plot type: statistical box (plotly.graph_objs.Box)
- data series format: box
- title: "VPP HTTP Server Performance", top, centered, font size 18
- x-axis: integers, font size 16, bottom
- x-axis label: "Indices of Test Cases [Index]", bottom, centered, font size 16
- y-axis: floats, starting with 0, dynamic range, linear, font size 16, left
- y-axis label: "Connections per second [cps]", "Requests per second [rps]",
  top, left
- legend: "Indexed Test Cases [idx]", bottom, left, font size 16

New graphs to be added
----------------------

- *Comparison between releases*

    - compare MRR, NDR, PDR between releases
    - use as many releases as available

- *Comparison between processor architectures*

    - compare MRR, NDR, PDR between processor architectures
    - HSW vs SKX (vs ARM when available)

- *Comparison between 2-node and 3-node topologies*

    - compare MRR, NDR, PDR between topologies
    - 3n-skx vs 2n-skx

- *Comparison between different physical testbed instances*

    - compare the results of the same test (MRR, NDR, PDR) run on different
      instances of the same testbed, e.g. HSW
    - HSW vs HSW, SKX vs SKX

- *Comparison between NICs*

    - compare the results of the same test (MRR, NDR, PDR) run on different NICs
      but on the same instance of a physical testbed.
    - x520 vs x710 vs xl710 on HSW
    - x710 vs xxv710 on SKX

- *Other comparisons*

.. note::

    - Partially based on the existing tables in the Report
    - Only selected TCs

.. _Comparison between releases:

Comparison between releases
```````````````````````````

This graph will compare the results of the same test from different releases.
One graph can present the data from one or more tests logically grouped. See
`Grouping of tests in graphs`_ for more information.
Each data point is constructed from 10 samples. The statistical data are
displayed as hover information.

.. image:: pic/graph_cmp_releases.svg
    :width: 800 px
    :scale: 50 %
    :align: center
    :alt: Graph "Comparison between releases" not found.

**Description:**

*Data displayed:*

- data: packet throughput
- x-axis: release
- y-axis: packet throughput [Mpps], linear scale, beginning with 0
- hover information: statistical data (median, stdev), test case name, release

*Layout:*

- plot type: scatter with line
- data series format: line with markers
- title: "Packet Throughput: <area, scaling, features, framesize, cores, ...>",
  top, centered, font size 18
- x-axis: strings, font size 16, bottom
- x-axis label: "Release", bototm, centered, font size 16
- y-axis: floats, starting with 0, dynamic range, linear, bottom, font size 16
- y-axis label: "Packet Throughput [Mpps]", middle, left, font size 16
- legend: "Test Cases", bottom, left, font size 16

.. _Comparison between processor architectures:

Comparison between processor architectures
``````````````````````````````````````````

This graph will compare the results of the same test from the same release run
on the different processor architectures (HSW, SKX, later ARM).
One graph can present the data from one or more tests logically grouped. See
`Grouping of tests in graphs`_ for more information.
Each data point is constructed from 10 samples. The statistical data are
displayed as hover information.

.. image:: pic/graph_cmp_arch.svg
    :width: 800 px
    :scale: 50 %
    :align: center
    :alt: Graph "Comparison between processor architectures" not found.

**Description:**

*Data displayed:*

- data: packet throughput
- x-axis: processor architecture
- y-axis: throughput [Mpps], linear scale, beginning with 0
- hover information: statistical data (median, stdev), test case name, processor
  architecture

*Layout:*

- plot type: scatter with line
- data series format: line with markers
- title: "Packet Throughput: <area, scaling, features, framesize, cores, ...>",
  top, centered, font size 18
- x-axis: strings, font size 16, bottom, centered
- x-axis label: "Processor architecture", bottom, centered, font size 16
- y-axis: floats, starting with 0, dynamic range, linear, font size 16, left
- y-axis label: "Packet Throughput [Mpps]", middle, left
- legend: "Test cases", bottom, left, font size 16

.. _Comparison between 2-node and 3-node topologies:

Comparison between 2-node and 3-node topologies
```````````````````````````````````````````````

This graph will compare the results of the same test from the same release run
on the same processor architecture but different topologies (3n-skx, 2n-skx).
One graph can present the data from one or more tests logically grouped. See
`Grouping of tests in graphs`_ for more information.
Each data point is constructed from 10 samples. The statistical data are
displayed as hover information.

.. image:: pic/graph_cmp_topo.svg
    :width: 800 px
    :scale: 50 %
    :align: center
    :alt: Graph "Comparison between 2-node and 3-node topologies" not found.

**Description:**

*Data displayed:*

- data: packet throughput
- x-axis: topology
- y-axis: throughput [Mpps], linear scale, beginning with 0
- hover information: statistical data (median, stdev), test case name, topology

*Layout:*

- plot type: scatter with line
- data series format: line with markers
- title: "Packet Throughput: <area, scaling, features, framesize, cores, ...>",
  top, centered, font size 18
- x-axis: strings, font size 16, bottom, centered
- x-axis label: "Topology", bottom, centered, font size 16
- y-axis: floats, starting with 0, dynamic range, linear, font size 16, left
- y-axis label: "Packet Throughput [Mpps]", middle, left, font size 16
- legend: "Test cases", bottom, left, font size 16

.. _Comparison between different physical testbed instances:

Comparison between different physical testbed instances
```````````````````````````````````````````````````````

This graph will compare the results of the same test from the same release run
on the same processor architecture, the same topology but different physical
testbed instances.
One graph can present the data from one or more tests logically grouped. See
`Grouping of tests in graphs`_ for more information.
Each data point is constructed from 10 samples. The statistical data are
displayed as hover information.


.. image:: pic/graph_cmp_testbed.svg
    :width: 800 px
    :scale: 50 %
    :align: center
    :alt: Graph "Comparison between different physical testbed instances" not
          found.

**Description:**

*Data displayed:*

- data: packet throughput
- x-axis: physical testbed instances
- y-axis: throughput [Mpps], linear scale, beginning with 0
- hover information: statistical data (median, stdev), test case name, physical
  testbed instance

*Layout:*

- plot type: scatter with line
- data series format: line with markers
- title: "Packet Throughput: <area, scaling, features, framesize, cores, ...>",
  top, centered, font size 18
- x-axis: strings, font size 16, bottom, centered
- x-axis label: "Physical Testbed Instance", bottom, centered, font size 16
- y-axis: floats, starting with 0, dynamic range, linear, font size 16, left
- y-axis label: "Packet Throughput [Mpps]", middle, left, font size 16
- legend: "Test cases", bottom, left, font size 16

.. _Comparison between NICs:

Comparison between NICs
```````````````````````

This graph will compare the results of the same test from the same release run
on the same processor architecture, the same topology but different NICs.
One graph can present the data from one or more tests logically grouped. See
`Grouping of tests in graphs`_ for more information.
Each data point is constructed from 10 samples. The statistical data are
displayed as hover information.

.. image:: pic/graph_cmp_nics.svg
    :width: 800 px
    :scale: 50 %
    :align: center
    :alt: Graph "Comparison between NICs" not found.

**Description:**

*Data displayed:*

- data: packet throughput
- x-axis: NICs
- y-axis: packet throughput [Mpps], linear scale, beginning with 0
- hover information: statistical data (median, stdev), test case name, NIC

*Layout:*

- plot type: scatter with line
- data series format: line with markers
- title: "Packet Throughput: <area, scaling, features, framesize, cores, ...>",
  top, centered, font size 18
- x-axis: strings, font size 16, bottom
- x-axis label: "NIC", bottom, centered, font size 16
- y-axis: floats, starting with 0, dynamic range, linear, font size 16, left
- y-axis label: "Packet Throughput [Mpps]", middle, left, font size 16
- legend: "Test cases", bottom, left, font size 16

.. _Other comparisons:

Other comparisons
`````````````````

**Other tests results comparisons**

- compare packet throughput for vhost vs memif

**Other views on collected data**

- per `Vratko Polak email on csit-dev <https://lists.fd.io/g/csit-dev/message/3008>`_.

.. _Grouping of tests in graphs:

Grouping of tests in graphs
---------------------------

A graph can present results of one or more tests. The tests are grouped
according to the defined criteria. In the ideal case, all graphs use the same
groups of tests.

The grouping of tests is described in a separate document.

.. TODO: [MK], [TF]: Create the document.
.. TODO: [TF]: Add the link.
.. TODO: [TF]: Remove/edit the next paragraph when the document is ready.

**Example of data grouping:**

- ip4: ip4base, ip4scale20k, ip4scale200k, ip4scale2m
    - data presented in this order from left to right
- ip6: similar to ip4
- l2bd: similar to ip4.

.. _Sorting of tests presented in a graph:

Sorting of tests presented in a graph
-------------------------------------

It is possible to specify the order of tests (suites) on the x-axis presented in
a graph:

- `Packet Throughput`_
- `Packet Latency`_

It is possible to specify the order of tests (suites) in the legend presented in
a graph:

- `Speedup Multi-core`_

In both cases the order is defined in the specification file for each plot
separately, e.g.:

.. code:: yaml

    -
      type: "plot"
      <other parameters>
      sort:
      - "IP4BASE"
      - "FIB_20K"
      - "FIB_200K"
      - "FIB_2M"

The sorting is based on tags. If more then one test has the same tag, only the
first one is taken. The remaining tests and the tests without listed tags are
placed at the end of the list in random order.

.. _export static images:

Export of static images
-----------------------

Not implemented yet. For more information see:

- `Plotly: Static image export <https://plot.ly/python/static-image-export/>`_
- prefered vector formats (svg, pdf)
- requirements:
    - plotly-orca
        - `Orca <https://github.com/plotly/orca>`_
        - `Orca releases <https://github.com/plotly/orca/releases>`_
        - `Orca management in Python <https://plot.ly/python/orca-management/>`_
    - psutil

.. _Physical performance limits:

Physical performance limits
---------------------------

+-----------------+----------------+
| Ethernet links  | pps @64B       |
+=================+================+
|  10ge           |  14,880,952.38 |
+-----------------+----------------+
|  25ge           |  37,202,380.95 |
+-----------------+----------------+
|  40ge           |  59,523,809.52 |
+-----------------+----------------+
|  100ge          | 148,809,523.81 |
+-----------------+----------------+


+-----------------+----------------+
| Ethernet links  | bps            |
+=================+================+
| 64B             |                |
+-----------------+----------------+
| IMIX            |                |
+-----------------+----------------+
| 1518B           |                |
+-----------------+----------------+
| 9000B           |                |
+-----------------+----------------+


+-----------------+----------------+
| NIC             | pps @64B       |
+=================+================+
| x520            | 24,460,000     |
+-----------------+----------------+
| x710            | 35,800,000     |
+-----------------+----------------+
| xxv710          | 35,800,000     |
+-----------------+----------------+
| xl710           | 35,800,000     |
+-----------------+----------------+


+-----------------+----------------+
| NIC             | bw ??B         |
+=================+================+
| x520            |                |
+-----------------+----------------+
| x710            |                |
+-----------------+----------------+
| xxv710          |                |
+-----------------+----------------+
| xl710           |                |
+-----------------+----------------+


+-----------------+----------------+
| PCIe            | bps            |
+=================+================+
| PCIe Gen3 x8    | 50,000,000,000 |
+-----------------+----------------+
| PCIe Gen3 x16   | 100,000,000,000|
+-----------------+----------------+


+-----------------+----------------+
| PCIe            | pps @64B       |
+=================+================+
| PCIe Gen3 x8    |  74,404,761.90 |
+-----------------+----------------+
| PCIe Gen3 x16   | 148,809,523.81 |
+-----------------+----------------+