aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools
diff options
context:
space:
mode:
Diffstat (limited to 'resources/tools')
-rw-r--r--resources/tools/presentation/conf_cpta/conf.py103
-rw-r--r--resources/tools/presentation/doc/pal_lld.rst40
-rw-r--r--resources/tools/presentation/generator_CPTA.py429
-rw-r--r--resources/tools/presentation/generator_plots.py2
-rw-r--r--resources/tools/presentation/generator_report.py73
-rw-r--r--resources/tools/presentation/generator_tables.py2
-rw-r--r--resources/tools/presentation/input_data_files.py91
-rw-r--r--resources/tools/presentation/input_data_parser.py114
-rw-r--r--resources/tools/presentation/pal.py28
-rwxr-xr-xresources/tools/presentation/run_cpta.sh37
-rwxr-xr-xresources/tools/presentation/run_report.sh3
-rw-r--r--resources/tools/presentation/specification_CPTA.yaml400
-rw-r--r--resources/tools/presentation/specification_parser.py87
-rw-r--r--resources/tools/presentation/utils.py125
14 files changed, 1380 insertions, 154 deletions
diff --git a/resources/tools/presentation/conf_cpta/conf.py b/resources/tools/presentation/conf_cpta/conf.py
new file mode 100644
index 0000000000..9b6e5f3bc1
--- /dev/null
+++ b/resources/tools/presentation/conf_cpta/conf.py
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+#
+# CSIT report documentation build configuration file
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+import os
+import sys
+
+sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#
+# needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = ['sphinxcontrib.programoutput',
+ 'sphinx.ext.ifconfig']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+#
+source_suffix = ['.rst', '.md']
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'FD.io CSIT'
+copyright = u'2018, FD.io'
+author = u'FD.io CSIT'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+#version = u''
+# The full version, including alpha/beta/rc tags.
+#release = u''
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = 'en'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This patterns also effect to html_static_path and html_extra_path
+exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = False
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+#
+html_theme = 'sphinx_rtd_theme'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#
+# html_theme_options = {}
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_theme_path = ['env/lib/python2.7/site-packages/sphinx_rtd_theme']
+
+# html_static_path = ['_build/_static']
+html_static_path = ['../_tmp/src/_static']
+
+html_context = {
+ 'css_files': [
+ '_static/theme_overrides.css', # overrides for wide tables in RTD theme
+ ],
+ }
diff --git a/resources/tools/presentation/doc/pal_lld.rst b/resources/tools/presentation/doc/pal_lld.rst
index 7ca3ad43d5..2e119fad82 100644
--- a/resources/tools/presentation/doc/pal_lld.rst
+++ b/resources/tools/presentation/doc/pal_lld.rst
@@ -1368,6 +1368,46 @@ of an element is required, only a new algorithm needs to be implemented
and integrated.
+Continuous Performance Measurements and Trending
+------------------------------------------------
+
+Performance analysis and trending execution sequence:
+`````````````````````````````````````````````````````
+
+1. Triggered at completion of Performance Measurements and Archiving (PMA) job.
+
+ a. Periodic, or gerrit triggers are supported too.
+
+2. Download RF output.xml from triggering CPM job.
+3. Parse out the test results listed in PAL specification file.
+4. Reads specified amount of PMA historical data from Nexus.
+5. Calculate specified statistical metrics – see next section.
+6. Evaluate latest results against the historical metrics, quantify relative
+ change and based on defined criteria set the result to Pass (no-change or
+ progression) or Fail (regression).
+7. Add the new data to historical data.
+8. Generate a new set of trend analysis summary and drill-down graphs.
+9. Archive the latest RF output.xml to nexus for future analysis.
+10. Publish trend analysis graphs in html format on https://docs.fd.io/.
+
+Parameters to specify:
+``````````````````````
+
+- job to be monitored - the Jenkins job which results are used as input data for
+ this test;
+- number of builds used for trending plot(s) - specified by an integer greater
+ than zero, or zero for all available builds;
+- tests we are interested in (list) list of tests which results are used for the
+ test;
+- window size for the moving average.
+
+*Example:*
+
+::
+
+ TODO
+
+
API
---
diff --git a/resources/tools/presentation/generator_CPTA.py b/resources/tools/presentation/generator_CPTA.py
new file mode 100644
index 0000000000..c1b14f1f55
--- /dev/null
+++ b/resources/tools/presentation/generator_CPTA.py
@@ -0,0 +1,429 @@
+# Copyright (c) 2018 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Generation of Continuous Performance Trending and Analysis.
+"""
+
+import datetime
+import logging
+import plotly.offline as ploff
+import plotly.graph_objs as plgo
+import numpy as np
+import pandas as pd
+
+from collections import OrderedDict
+from utils import find_outliers, archive_input_data, execute_command
+
+
+# Command to build the html format of the report
+HTML_BUILDER = 'sphinx-build -v -c conf_cpta -a ' \
+ '-b html -E ' \
+ '-t html ' \
+ '{working_dir} ' \
+ '{build_dir}/'
+
+# .css file for the html format of the report
+THEME_OVERRIDES = """/* override table width restrictions */
+.wy-nav-content {
+ max-width: 1200px !important;
+}
+"""
+
+COLORS = ["SkyBlue", "Olive", "Purple", "Coral", "Indigo", "Pink",
+ "Chocolate", "Brown", "Magenta", "Cyan", "Orange", "Black",
+ "Violet", "Blue", "Yellow"]
+
+
+def generate_cpta(spec, data):
+ """Generate all formats and versions of the Continuous Performance Trending
+ and Analysis.
+
+ :param spec: Specification read from the specification file.
+ :param data: Full data set.
+ :type spec: Specification
+ :type data: InputData
+ """
+
+ logging.info("Generating the Continuous Performance Trending and Analysis "
+ "...")
+
+ ret_code = _generate_all_charts(spec, data)
+
+ cmd = HTML_BUILDER.format(
+ date=datetime.date.today().strftime('%d-%b-%Y'),
+ working_dir=spec.environment["paths"]["DIR[WORKING,SRC]"],
+ build_dir=spec.environment["paths"]["DIR[BUILD,HTML]"])
+ execute_command(cmd)
+
+ with open(spec.environment["paths"]["DIR[CSS_PATCH_FILE]"], "w") as \
+ css_file:
+ css_file.write(THEME_OVERRIDES)
+
+ with open(spec.environment["paths"]["DIR[CSS_PATCH_FILE2]"], "w") as \
+ css_file:
+ css_file.write(THEME_OVERRIDES)
+
+ archive_input_data(spec)
+
+ logging.info("Done.")
+
+ return ret_code
+
+
+def _select_data(in_data, period, fill_missing=False, use_first=False):
+ """Select the data from the full data set. The selection is done by picking
+ the samples depending on the period: period = 1: All, period = 2: every
+ second sample, period = 3: every third sample ...
+
+ :param in_data: Full set of data.
+ :param period: Sampling period.
+ :param fill_missing: If the chosen sample is missing in the full set, its
+ nearest neighbour is used.
+ :param use_first: Use the first sample even though it is not chosen.
+ :type in_data: OrderedDict
+ :type period: int
+ :type fill_missing: bool
+ :type use_first: bool
+ :returns: Reduced data.
+ :rtype: OrderedDict
+ """
+
+ first_idx = min(in_data.keys())
+ last_idx = max(in_data.keys())
+
+ idx = last_idx
+ data_dict = dict()
+ if use_first:
+ data_dict[first_idx] = in_data[first_idx]
+ while idx >= first_idx:
+ data = in_data.get(idx, None)
+ if data is None:
+ if fill_missing:
+ threshold = int(round(idx - period / 2)) + 1 - period % 2
+ idx_low = first_idx if threshold < first_idx else threshold
+ threshold = int(round(idx + period / 2))
+ idx_high = last_idx if threshold > last_idx else threshold
+
+ flag_l = True
+ flag_h = True
+ idx_lst = list()
+ inc = 1
+ while flag_l or flag_h:
+ if idx + inc > idx_high:
+ flag_h = False
+ else:
+ idx_lst.append(idx + inc)
+ if idx - inc < idx_low:
+ flag_l = False
+ else:
+ idx_lst.append(idx - inc)
+ inc += 1
+
+ for i in idx_lst:
+ if i in in_data.keys():
+ data_dict[i] = in_data[i]
+ break
+ else:
+ data_dict[idx] = data
+ idx -= period
+
+ return OrderedDict(sorted(data_dict.items(), key=lambda t: t[0]))
+
+
+def _evaluate_results(in_data, trimmed_data, window=10):
+ """Evaluates if the sample value is regress, normal or progress compared to
+ previous data within the window.
+ We use the intervals defined as:
+ - regress: less than median - 3 * stdev
+ - normal: between median - 3 * stdev and median + 3 * stdev
+ - progress: more than median + 3 * stdev
+
+ :param in_data: Full data set.
+ :param trimmed_data: Full data set without the outliers.
+ :param window: Window size used to calculate moving median and moving stdev.
+ :type in_data: pandas.Series
+ :type trimmed_data: pandas.Series
+ :type window: int
+ :returns: Evaluated results.
+ :rtype: list
+ """
+
+ if len(in_data) > 2:
+ win_size = in_data.size if in_data.size < window else window
+ results = [0.0, ] * win_size
+ median = in_data.rolling(window=win_size).median()
+ stdev_t = trimmed_data.rolling(window=win_size, min_periods=2).std()
+ m_vals = median.values
+ s_vals = stdev_t.values
+ d_vals = in_data.values
+ for day in range(win_size, in_data.size):
+ if np.isnan(m_vals[day - 1]) or np.isnan(s_vals[day - 1]):
+ results.append(0.0)
+ elif d_vals[day] < (m_vals[day - 1] - 3 * s_vals[day - 1]):
+ results.append(0.33)
+ elif (m_vals[day - 1] - 3 * s_vals[day - 1]) <= d_vals[day] <= \
+ (m_vals[day - 1] + 3 * s_vals[day - 1]):
+ results.append(0.66)
+ else:
+ results.append(1.0)
+ else:
+ results = [0.0, ]
+ try:
+ median = np.median(in_data)
+ stdev = np.std(in_data)
+ if in_data.values[-1] < (median - 3 * stdev):
+ results.append(0.33)
+ elif (median - 3 * stdev) <= in_data.values[-1] <= (
+ median + 3 * stdev):
+ results.append(0.66)
+ else:
+ results.append(1.0)
+ except TypeError:
+ results.append(None)
+ return results
+
+
+def _generate_trending_traces(in_data, period, moving_win_size=10,
+ fill_missing=True, use_first=False,
+ show_moving_median=True, name="", color=""):
+ """Generate the trending traces:
+ - samples,
+ - moving median (trending plot)
+ - outliers, regress, progress
+
+ :param in_data: Full data set.
+ :param period: Sampling period.
+ :param moving_win_size: Window size.
+ :param fill_missing: If the chosen sample is missing in the full set, its
+ nearest neighbour is used.
+ :param use_first: Use the first sample even though it is not chosen.
+ :param show_moving_median: Show moving median (trending plot).
+ :param name: Name of the plot
+ :param color: Name of the color for the plot.
+ :type in_data: OrderedDict
+ :type period: int
+ :type moving_win_size: int
+ :type fill_missing: bool
+ :type use_first: bool
+ :type show_moving_median: bool
+ :type name: str
+ :type color: str
+ :returns: Generated traces (list) and the evaluated result (float).
+ :rtype: tuple(traces, result)
+ """
+
+ if period > 1:
+ in_data = _select_data(in_data, period,
+ fill_missing=fill_missing,
+ use_first=use_first)
+
+ data_x = [key for key in in_data.keys()]
+ data_y = [val for val in in_data.values()]
+ data_pd = pd.Series(data_y, index=data_x)
+
+ t_data, outliers = find_outliers(data_pd)
+
+ results = _evaluate_results(data_pd, t_data, window=moving_win_size)
+
+ anomalies = pd.Series()
+ anomalies_res = list()
+ for idx, item in enumerate(in_data.items()):
+ item_pd = pd.Series([item[1], ], index=[item[0], ])
+ if item[0] in outliers.keys():
+ anomalies = anomalies.append(item_pd)
+ anomalies_res.append(0.0)
+ elif results[idx] in (0.33, 1.0):
+ anomalies = anomalies.append(item_pd)
+ anomalies_res.append(results[idx])
+ anomalies_res.extend([0.0, 0.33, 0.66, 1.0])
+
+ # Create traces
+ color_scale = [[0.00, "grey"],
+ [0.25, "grey"],
+ [0.25, "red"],
+ [0.50, "red"],
+ [0.50, "white"],
+ [0.75, "white"],
+ [0.75, "green"],
+ [1.00, "green"]]
+
+ trace_samples = plgo.Scatter(
+ x=data_x,
+ y=data_y,
+ mode='markers',
+ line={
+ "width": 1
+ },
+ name="{name}-thput".format(name=name),
+ marker={
+ "size": 5,
+ "color": color,
+ "symbol": "circle",
+ },
+ )
+ traces = [trace_samples, ]
+
+ trace_anomalies = plgo.Scatter(
+ x=anomalies.keys(),
+ y=anomalies.values,
+ mode='markers',
+ hoverinfo="none",
+ showlegend=False,
+ legendgroup=name,
+ name="{name}: outliers".format(name=name),
+ marker={
+ "size": 15,
+ "symbol": "circle-open",
+ "color": anomalies_res,
+ "colorscale": color_scale,
+ "showscale": True,
+
+ "colorbar": {
+ "y": 0.5,
+ "len": 0.8,
+ "title": "Results Clasification",
+ "titleside": 'right',
+ "titlefont": {
+ "size": 14
+ },
+ "tickmode": 'array',
+ "tickvals": [0.125, 0.375, 0.625, 0.875],
+ "ticktext": ["Outlier", "Regress", "Normal", "Progress"],
+ "ticks": 'outside',
+ "ticklen": 0,
+ "tickangle": -90,
+ "thickness": 10
+ }
+ }
+ )
+ traces.append(trace_anomalies)
+
+ if show_moving_median:
+ data_mean_y = pd.Series(data_y).rolling(
+ window=moving_win_size).median()
+ trace_median = plgo.Scatter(
+ x=data_x,
+ y=data_mean_y,
+ mode='lines',
+ line={
+ "shape": "spline",
+ "width": 1,
+ "color": color,
+ },
+ name='{name}-trend'.format(name=name, size=moving_win_size)
+ )
+ traces.append(trace_median)
+
+ return traces, results[-1]
+
+
+def _generate_chart(traces, layout, file_name):
+ """Generates the whole chart using pre-generated traces.
+
+ :param traces: Traces for the chart.
+ :param layout: Layout of the chart.
+ :param file_name: File name for the generated chart.
+ :type traces: list
+ :type layout: dict
+ :type file_name: str
+ """
+
+ # Create plot
+ logging.info(" Writing the file '{0}' ...".format(file_name))
+ plpl = plgo.Figure(data=traces, layout=layout)
+ ploff.plot(plpl, show_link=False, auto_open=False, filename=file_name)
+
+
+def _generate_all_charts(spec, input_data):
+ """Generate all charts specified in the specification file.
+
+ :param spec: Specification.
+ :param input_data: Full data set.
+ :type spec: Specification
+ :type input_data: InputData
+ """
+
+ results = list()
+ for chart in spec.cpta["plots"]:
+ logging.info(" Generating the chart '{0}' ...".
+ format(chart.get("title", "")))
+
+ # Transform the data
+ data = input_data.filter_data(chart, continue_on_error=True)
+ if data is None:
+ logging.error("No data.")
+ return
+
+ chart_data = dict()
+ for job in data:
+ for idx, build in job.items():
+ for test in build:
+ if chart_data.get(test["name"], None) is None:
+ chart_data[test["name"]] = OrderedDict()
+ try:
+ chart_data[test["name"]][int(idx)] = \
+ test["result"]["throughput"]
+ except (KeyError, TypeError):
+ chart_data[test["name"]][int(idx)] = None
+
+ for period in chart["periods"]:
+ # Generate traces:
+ traces = list()
+ win_size = 10 if period == 1 else 5 if period < 20 else 3
+ idx = 0
+ for test_name, test_data in chart_data.items():
+ if not test_data:
+ logging.warning("No data for the test '{0}'".
+ format(test_name))
+ continue
+ trace, result = _generate_trending_traces(
+ test_data,
+ period=period,
+ moving_win_size=win_size,
+ fill_missing=True,
+ use_first=False,
+ name='-'.join(test_name.split('-')[3:-1]),
+ color=COLORS[idx])
+ traces.extend(trace)
+ results.append(result)
+ idx += 1
+
+ # Generate the chart:
+ period_name = "Daily" if period == 1 else \
+ "Weekly" if period < 20 else "Monthly"
+ chart["layout"]["title"] = chart["title"].format(period=period_name)
+ _generate_chart(traces,
+ chart["layout"],
+ file_name="{0}-{1}-{2}{3}".format(
+ spec.cpta["output-file"],
+ chart["output-file-name"],
+ period,
+ spec.cpta["output-file-type"]))
+
+ logging.info(" Done.")
+
+ result = "PASS"
+ for item in results:
+ if item is None:
+ result = "FAIL"
+ break
+ if item == 0.66 and result == "PASS":
+ result = "PASS"
+ elif item == 0.33 or item == 0.0:
+ result = "FAIL"
+ print(results)
+ print(result)
+ if result == "FAIL":
+ return 1
+ else:
+ return 0
diff --git a/resources/tools/presentation/generator_plots.py b/resources/tools/presentation/generator_plots.py
index ac77b3d425..b7fd420aa2 100644
--- a/resources/tools/presentation/generator_plots.py
+++ b/resources/tools/presentation/generator_plots.py
@@ -19,6 +19,7 @@ import logging
import pandas as pd
import plotly.offline as ploff
import plotly.graph_objs as plgo
+
from plotly.exceptions import PlotlyError
from utils import mean
@@ -371,7 +372,6 @@ def plot_http_server_performance_box(plot, input_data):
y=df[col],
name=name,
**plot["traces"]))
-
try:
# Create plot
plpl = plgo.Figure(data=traces, layout=plot["layout"])
diff --git a/resources/tools/presentation/generator_report.py b/resources/tools/presentation/generator_report.py
index cf8a8d1675..55ac76bd1d 100644
--- a/resources/tools/presentation/generator_report.py
+++ b/resources/tools/presentation/generator_report.py
@@ -22,7 +22,7 @@ from os import makedirs, environ
from os.path import isdir
from shutil import copy, Error, make_archive
-from utils import get_files
+from utils import get_files, execute_command, archive_input_data
from errors import PresentationError
@@ -82,7 +82,7 @@ def generate_report(release, spec):
"pdf": generate_pdf_report
}
- for report_format, versions in spec.output.items():
+ for report_format, versions in spec.output["format"].items():
report[report_format](release, spec, versions)
archive_input_data(spec)
@@ -110,7 +110,7 @@ def generate_html_report(release, spec, versions):
date=datetime.date.today().strftime('%d-%b-%Y'),
working_dir=spec.environment["paths"]["DIR[WORKING,SRC]"],
build_dir=spec.environment["paths"]["DIR[BUILD,HTML]"])
- _execute_command(cmd)
+ execute_command(cmd)
with open(spec.environment["paths"]["DIR[CSS_PATCH_FILE]"], "w") as \
css_file:
@@ -146,7 +146,7 @@ def generate_pdf_report(release, spec, versions):
for plot in plots:
file_name = "{0}".format(plot.rsplit(".", 1)[0])
cmd = convert_plots.format(html=plot, pdf=file_name)
- _execute_command(cmd)
+ execute_command(cmd)
# Generate the LaTeX documentation
build_dir = spec.environment["paths"]["DIR[BUILD,LATEX]"]
@@ -155,7 +155,7 @@ def generate_pdf_report(release, spec, versions):
date=datetime.date.today().strftime('%d-%b-%Y'),
working_dir=spec.environment["paths"]["DIR[WORKING,SRC]"],
build_dir=build_dir)
- _execute_command(cmd)
+ execute_command(cmd)
# Build pdf documentation
archive_dir = spec.environment["paths"]["DIR[STATIC,ARCH]"]
@@ -174,7 +174,7 @@ def generate_pdf_report(release, spec, versions):
]
for cmd in cmds:
- _execute_command(cmd)
+ execute_command(cmd)
logging.info(" Done.")
@@ -193,64 +193,3 @@ def archive_report(spec):
base_dir=spec.environment["paths"]["DIR[BUILD,HTML]"])
logging.info(" Done.")
-
-
-def archive_input_data(spec):
- """Archive the report.
-
- :param spec: Specification read from the specification file.
- :type spec: Specification
- :raises PresentationError: If it is not possible to archive the input data.
- """
-
- logging.info(" Archiving the input data files ...")
-
- if spec.is_debug:
- extension = spec.debug["input-format"]
- else:
- extension = spec.input["file-format"]
- data_files = get_files(spec.environment["paths"]["DIR[WORKING,DATA]"],
- extension=extension)
- dst = spec.environment["paths"]["DIR[STATIC,ARCH]"]
- logging.info(" Destination: {0}".format(dst))
-
- try:
- if not isdir(dst):
- makedirs(dst)
-
- for data_file in data_files:
- logging.info(" Copying the file: {0} ...".format(data_file))
- copy(data_file, dst)
-
- except (Error, OSError) as err:
- raise PresentationError("Not possible to archive the input data.",
- str(err))
-
- logging.info(" Done.")
-
-
-def _execute_command(cmd):
- """Execute the command in a subprocess and log the stdout and stderr.
-
- :param cmd: Command to execute.
- :type cmd: str
- :returns: Return code of the executed command.
- :rtype: int
- """
-
- env = environ.copy()
- proc = subprocess.Popen(
- [cmd],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- shell=True,
- env=env)
-
- stdout, stderr = proc.communicate()
-
- logging.info(stdout)
- logging.info(stderr)
-
- if proc.returncode != 0:
- logging.error(" Command execution failed.")
- return proc.returncode
diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py
index 76254c86dd..f4fe1be174 100644
--- a/resources/tools/presentation/generator_tables.py
+++ b/resources/tools/presentation/generator_tables.py
@@ -22,7 +22,7 @@ import prettytable
from string import replace
from errors import PresentationError
-from utils import mean, stdev, relative_change, remove_outliers
+from utils import mean, stdev, relative_change
def generate_tables(spec, data):
diff --git a/resources/tools/presentation/input_data_files.py b/resources/tools/presentation/input_data_files.py
index 7dae834b4d..7e19478570 100644
--- a/resources/tools/presentation/input_data_files.py
+++ b/resources/tools/presentation/input_data_files.py
@@ -16,13 +16,14 @@ Download all data.
"""
import re
-
+import gzip
import logging
from os import rename, remove
from os.path import join, getsize
from shutil import move
from zipfile import ZipFile, is_zipfile, BadZipfile
+
from httplib import responses
from requests import get, codes, RequestException, Timeout, TooManyRedirects, \
HTTPError, ConnectionError
@@ -51,7 +52,13 @@ def download_data_files(spec):
for job, builds in spec.builds.items():
for build in builds:
if job.startswith("csit-"):
- url = spec.environment["urls"]["URL[JENKINS,CSIT]"]
+ if spec.input["file-name"].endswith(".zip"):
+ url = spec.environment["urls"]["URL[JENKINS,CSIT]"]
+ elif spec.input["file-name"].endswith(".gz"):
+ url = spec.environment["urls"]["URL[NEXUS,LOG]"]
+ else:
+ logging.error("Not supported file format.")
+ continue
elif job.startswith("hc2vpp-"):
url = spec.environment["urls"]["URL[JENKINS,HC]"]
else:
@@ -106,25 +113,37 @@ def download_data_files(spec):
file_handle.write(chunk)
file_handle.close()
- expected_length = None
- try:
- expected_length = int(response.headers["Content-Length"])
- logging.debug(" Expected file size: {0}B".
- format(expected_length))
- except KeyError:
- logging.debug(" No information about expected size.")
-
- real_length = getsize(new_name)
- logging.debug(" Downloaded size: {0}B".format(real_length))
-
- if expected_length:
- if real_length == expected_length:
+ if spec.input["file-name"].endswith(".zip"):
+ expected_length = None
+ try:
+ expected_length = int(response.
+ headers["Content-Length"])
+ logging.debug(" Expected file size: {0}B".
+ format(expected_length))
+ except KeyError:
+ logging.debug(" No information about expected size.")
+
+ real_length = getsize(new_name)
+ logging.debug(" Downloaded size: {0}B".format(real_length))
+
+ if expected_length:
+ if real_length == expected_length:
+ status = "downloaded"
+ logging.info("{0}: {1}".format(code,
+ responses[code]))
+ else:
+ logging.error("The file size differs from the "
+ "expected size.")
+ else:
status = "downloaded"
logging.info("{0}: {1}".format(code, responses[code]))
- else:
- logging.error("The file size differs from the expected "
- "size.")
- else:
+
+ elif spec.input["file-name"].endswith(".gz"):
+ rename(new_name, new_name[:-7])
+ with open(new_name[:-7], 'r') as xml_file:
+ with gzip.open(new_name, 'wb') as gz_file:
+ gz_file.write(xml_file.read())
+ new_name = new_name[:-7]
status = "downloaded"
logging.info("{0}: {1}".format(code, responses[code]))
@@ -185,29 +204,30 @@ def unzip_files(spec):
directory = spec.environment["paths"]["DIR[WORKING,DATA]"]
file_name = join(build["file-name"])
- if build["status"] == "downloaded" and is_zipfile(file_name):
+ if build["status"] == "downloaded":
logging.info("Unziping: '{0}' from '{1}'.".
format(data_file, file_name))
new_name = "{0}{1}{2}".format(file_name.rsplit('.')[-2],
SEPARATOR,
data_file.split("/")[-1])
try:
- with ZipFile(file_name, 'r') as zip_file:
- zip_file.extract(data_file, directory)
- logging.info("Moving {0} to {1} ...".
- format(join(directory, data_file),
- directory))
- move(join(directory, data_file), directory)
- logging.info("Renaming the file '{0}' to '{1}'".
- format(join(directory,
- data_file.split("/")[-1]),
- new_name))
- rename(join(directory, data_file.split("/")[-1]),
- new_name)
+ if is_zipfile(file_name):
+ with ZipFile(file_name, 'r') as zip_file:
+ zip_file.extract(data_file, directory)
+ logging.info("Moving {0} to {1} ...".
+ format(join(directory, data_file),
+ directory))
+ move(join(directory, data_file), directory)
+ logging.info("Renaming the file '{0}' to '{1}'".
+ format(join(directory,
+ data_file.split("/")[-1]),
+ new_name))
+ rename(join(directory, data_file.split("/")[-1]),
+ new_name)
+ spec.set_input_file_name(job, build["build"],
+ new_name)
status = "unzipped"
spec.set_input_state(job, build["build"], status)
- spec.set_input_file_name(job, build["build"],
- new_name)
except (BadZipfile, RuntimeError) as err:
logging.error("Failed to unzip the file '{0}': {1}.".
format(file_name, str(err)))
@@ -216,8 +236,7 @@ def unzip_files(spec):
format(data_file, str(err)))
finally:
if status == "failed":
- spec.set_input_file_name(job, build["build"],
- None)
+ spec.set_input_file_name(job, build["build"], None)
else:
raise PresentationError("The file '{0}' does not exist or "
"it is not a zip file".
diff --git a/resources/tools/presentation/input_data_parser.py b/resources/tools/presentation/input_data_parser.py
index 203a3bfd25..87d822f880 100644
--- a/resources/tools/presentation/input_data_parser.py
+++ b/resources/tools/presentation/input_data_parser.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 Cisco and/or its affiliates.
+# Copyright (c) 2018 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -23,6 +23,7 @@ import pandas as pd
import logging
from robot.api import ExecutionResult, ResultVisitor
+from robot import errors
from collections import OrderedDict
from string import replace
@@ -173,6 +174,9 @@ class ExecutionChecker(ResultVisitor):
REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
+ REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
+ r'tx\s(\d*),\srx\s(\d*)')
+
def __init__(self, **metadata):
"""Initialisation.
@@ -219,7 +223,7 @@ class ExecutionChecker(ResultVisitor):
self.parse_msg = {
"setup-version": self._get_version,
"teardown-vat-history": self._get_vat_history,
- "teardown-show-runtime": self._get_show_run
+ "test-show-runtime": self._get_show_run
}
@property
@@ -372,11 +376,11 @@ class ExecutionChecker(ResultVisitor):
self._data["suites"][suite.longname.lower().replace('"', "'").
replace(" ", "_")] = {
- "name": suite.name.lower(),
- "doc": doc_str,
- "parent": parent_name,
- "level": len(suite.longname.split("."))
- }
+ "name": suite.name.lower(),
+ "doc": doc_str,
+ "parent": parent_name,
+ "level": len(suite.longname.split("."))
+ }
suite.keywords.visit(self)
@@ -415,17 +419,20 @@ class ExecutionChecker(ResultVisitor):
test_result["tags"] = tags
doc_str = test.doc.replace('"', "'").replace('\n', ' '). \
replace('\r', '').replace('[', ' |br| [')
- test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
+ test_result["doc"] = replace(doc_str, ' |br| [', '[', maxreplace=1)
test_result["msg"] = test.message.replace('\n', ' |br| '). \
replace('\r', '').replace('"', "'")
- if test.status == "PASS" and ("NDRPDRDISC" in tags or "TCP" in tags):
-
+ if test.status == "PASS" and ("NDRPDRDISC" in tags or
+ "TCP" in tags or
+ "MRR" in tags):
if "NDRDISC" in tags:
test_type = "NDR"
elif "PDRDISC" in tags:
test_type = "PDR"
- elif "TCP" in tags: # Change to wrk?
+ elif "TCP" in tags:
test_type = "TCP"
+ elif "MRR" in tags:
+ test_type = "MRR"
else:
return
@@ -458,6 +465,15 @@ class ExecutionChecker(ResultVisitor):
test_result["result"] = dict()
test_result["result"]["value"] = int(groups.group(2))
test_result["result"]["unit"] = groups.group(1)
+ elif test_type in ("MRR", ):
+ groups = re.search(self.REGEX_MRR, test.message)
+ test_result["result"] = dict()
+ test_result["result"]["duration"] = int(groups.group(1))
+ test_result["result"]["tx"] = int(groups.group(2))
+ test_result["result"]["rx"] = int(groups.group(3))
+ test_result["result"]["throughput"] = int(
+ test_result["result"]["rx"] /
+ test_result["result"]["duration"])
else:
test_result["status"] = test.status
@@ -496,6 +512,9 @@ class ExecutionChecker(ResultVisitor):
elif keyword.type == "teardown":
self._lookup_kw_nr = 0
self.visit_teardown_kw(keyword)
+ else:
+ self._lookup_kw_nr = 0
+ self.visit_test_kw(keyword)
except AttributeError:
pass
@@ -508,6 +527,42 @@ class ExecutionChecker(ResultVisitor):
"""
pass
+ def visit_test_kw(self, test_kw):
+ """Implements traversing through the test keyword and its child
+ keywords.
+
+ :param test_kw: Keyword to process.
+ :type test_kw: Keyword
+ :returns: Nothing.
+ """
+ for keyword in test_kw.keywords:
+ if self.start_test_kw(keyword) is not False:
+ self.visit_test_kw(keyword)
+ self.end_test_kw(keyword)
+
+ def start_test_kw(self, test_kw):
+ """Called when test keyword starts. Default implementation does
+ nothing.
+
+ :param test_kw: Keyword to process.
+ :type test_kw: Keyword
+ :returns: Nothing.
+ """
+ if test_kw.name.count("Show Runtime Counters On All Duts"):
+ self._lookup_kw_nr += 1
+ self._show_run_lookup_nr = 0
+ self._msg_type = "test-show-runtime"
+ test_kw.messages.visit(self)
+
+ def end_test_kw(self, test_kw):
+ """Called when keyword ends. Default implementation does nothing.
+
+ :param test_kw: Keyword to process.
+ :type test_kw: Keyword
+ :returns: Nothing.
+ """
+ pass
+
def visit_setup_kw(self, setup_kw):
"""Implements traversing through the teardown keyword and its child
keywords.
@@ -568,12 +623,6 @@ class ExecutionChecker(ResultVisitor):
if teardown_kw.name.count("Show Vat History On All Duts"):
self._vat_history_lookup_nr = 0
self._msg_type = "teardown-vat-history"
- elif teardown_kw.name.count("Show Statistics On All Duts"):
- self._lookup_kw_nr += 1
- self._show_run_lookup_nr = 0
- self._msg_type = "teardown-show-runtime"
-
- if self._msg_type:
teardown_kw.messages.visit(self)
def end_teardown_kw(self, teardown_kw):
@@ -710,7 +759,12 @@ class InputData(object):
"""
with open(build["file-name"], 'r') as data_file:
- result = ExecutionResult(data_file)
+ try:
+ result = ExecutionResult(data_file)
+ except errors.DataError as err:
+ logging.error("Error occurred while parsing output.xml: {0}".
+ format(err))
+ return None
checker = ExecutionChecker(job=job, build=build)
result.visit(checker)
@@ -736,6 +790,11 @@ class InputData(object):
logging.info(" Processing the file '{0}'".
format(build["file-name"]))
data = InputData._parse_tests(job, build)
+ if data is None:
+ logging.error("Input data file from the job '{job}', build "
+ "'{build}' is damaged. Skipped.".
+ format(job=job, build=build["build"]))
+ continue
build_data = pd.Series({
"metadata": pd.Series(data["metadata"].values(),
@@ -793,7 +852,8 @@ class InputData(object):
index += 1
tag_filter = tag_filter[:index] + " in tags" + tag_filter[index:]
- def filter_data(self, element, params=None, data_set="tests"):
+ def filter_data(self, element, params=None, data_set="tests",
+ continue_on_error=False):
"""Filter required data from the given jobs and builds.
The output data structure is:
@@ -818,15 +878,18 @@ class InputData(object):
all parameters are included.
:param data_set: The set of data to be filtered: tests, suites,
metadata.
+ :param continue_on_error: Continue if there is error while reading the
+ data. The Item will be empty then
:type element: pandas.Series
:type params: list
:type data_set: str
+ :type continue_on_error: bool
:returns: Filtered data.
:rtype pandas.Series
"""
logging.info(" Creating the data set for the {0} '{1}'.".
- format(element["type"], element.get("title", "")))
+ format(element.get("type", ""), element.get("title", "")))
try:
if element["filter"] in ("all", "template"):
@@ -847,8 +910,15 @@ class InputData(object):
data[job] = pd.Series()
for build in builds:
data[job][str(build)] = pd.Series()
- for test_ID, test_data in \
- self.data[job][str(build)][data_set].iteritems():
+ try:
+ data_iter = self.data[job][str(build)][data_set].\
+ iteritems()
+ except KeyError:
+ if continue_on_error:
+ continue
+ else:
+ return None
+ for test_ID, test_data in data_iter:
if eval(cond, {"tags": test_data.get("tags", "")}):
data[job][str(build)][test_ID] = pd.Series()
if params is None:
diff --git a/resources/tools/presentation/pal.py b/resources/tools/presentation/pal.py
index 6d613e339c..85b7bbc370 100644
--- a/resources/tools/presentation/pal.py
+++ b/resources/tools/presentation/pal.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 Cisco and/or its affiliates.
+# Copyright (c) 2018 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -28,6 +28,9 @@ from generator_plots import generate_plots
from generator_files import generate_files
from static_content import prepare_static_content
from generator_report import generate_report
+from generator_CPTA import generate_cpta
+
+from pprint import pprint
def parse_args():
@@ -81,8 +84,9 @@ def main():
spec.read_specification()
except PresentationError:
logging.critical("Finished with error.")
- sys.exit(1)
+ return 1
+ ret_code = 1
try:
env = Environment(spec.environment, args.force)
env.set_environment()
@@ -101,22 +105,32 @@ def main():
generate_tables(spec, data)
generate_plots(spec, data)
generate_files(spec, data)
- generate_report(args.release, spec)
- logging.info("Successfully finished.")
+ if spec.output["output"] == "report":
+ generate_report(args.release, spec)
+ logging.info("Successfully finished.")
+ ret_code = 0
+ elif spec.output["output"] == "CPTA":
+ ret_code = generate_cpta(spec, data)
+ logging.info("Successfully finished.")
+ else:
+ logging.critical("The output '{0}' is not supported.".
+ format(spec.output["output"]))
+ ret_code = 1
except (KeyError, ValueError, PresentationError) as err:
logging.info("Finished with an error.")
logging.critical(str(err))
+ ret_code = 1
except Exception as err:
logging.info("Finished with an unexpected error.")
logging.critical(str(err))
-
+ ret_code = 1
finally:
if spec is not None and not spec.is_debug:
clean_environment(spec.environment)
- sys.exit(1)
+ return ret_code
if __name__ == '__main__':
- main()
+ sys.exit(main())
diff --git a/resources/tools/presentation/run_cpta.sh b/resources/tools/presentation/run_cpta.sh
new file mode 100755
index 0000000000..233e8dfbe2
--- /dev/null
+++ b/resources/tools/presentation/run_cpta.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+set +x
+
+# set default values in config array
+typeset -A DIR
+
+DIR[WORKING]=_tmp
+
+# Install system dependencies
+sudo apt-get -y update
+sudo apt-get -y install libxml2 libxml2-dev libxslt-dev build-essential \
+ zlib1g-dev unzip
+
+# Clean-up when finished
+trap 'rm -rf ${DIR[WORKING]}; exit' EXIT
+trap 'rm -rf ${DIR[WORKING]}; exit' ERR
+
+# Create working directories
+mkdir ${DIR[WORKING]}
+
+# Create virtual environment
+virtualenv ${DIR[WORKING]}/env
+. ${DIR[WORKING]}/env/bin/activate
+
+# Install python dependencies:
+pip install -r requirements.txt
+
+export PYTHONPATH=`pwd`
+
+python pal.py \
+ --specification specification_CPTA.yaml \
+ --logging INFO \
+ --force
+
+RETURN_STATUS=$(echo $?)
+exit ${RETURN_STATUS}
diff --git a/resources/tools/presentation/run_report.sh b/resources/tools/presentation/run_report.sh
index 34d6c5d7be..3c3a9f75ce 100755
--- a/resources/tools/presentation/run_report.sh
+++ b/resources/tools/presentation/run_report.sh
@@ -44,3 +44,6 @@ python pal.py \
--release ${RELEASE} \
--logging INFO \
--force
+
+RETURN_STATUS=$(echo $?)
+exit ${RETURN_STATUS}
diff --git a/resources/tools/presentation/specification_CPTA.yaml b/resources/tools/presentation/specification_CPTA.yaml
new file mode 100644
index 0000000000..51cdc6324b
--- /dev/null
+++ b/resources/tools/presentation/specification_CPTA.yaml
@@ -0,0 +1,400 @@
+# Copyright (c) 2018 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This is the specification of parameters for "Continuous Performance Trending
+# and Analysis" feature provided by PAL.
+
+-
+ type: "environment"
+ configuration:
+ # Debug mode:
+ # - Skip:
+ # - Download of input data files
+ # - Do:
+ # - Read data from given zip / xml files
+ # - Set the configuration as it is done in normal mode
+ # If the section "type: debug" is missing, CFG[DEBUG] is set to 0.
+ CFG[DEBUG]: 0
+
+ paths:
+ # Top level directories:
+ ## Working directory
+ DIR[WORKING]: "_tmp"
+ ## Build directories
+ DIR[BUILD,HTML]: "_build"
+ ## Static .rst files
+ DIR[RST]: "../../../docs/cpta"
+
+ # Static html content
+ DIR[STATIC]: "{DIR[BUILD,HTML]}/_static"
+ DIR[STATIC,VPP]: "{DIR[STATIC]}/vpp"
+ # DIR[STATIC,DPDK]: "{DIR[STATIC]}/dpdk"
+ DIR[STATIC,ARCH]: "{DIR[STATIC]}/archive"
+
+ # Working directories
+ ## Input data files (.zip, .xml)
+ DIR[WORKING,DATA]: "{DIR[WORKING]}/data"
+ ## Static source files from git
+ DIR[WORKING,SRC]: "{DIR[WORKING]}/src"
+ DIR[WORKING,SRC,STATIC]: "{DIR[WORKING,SRC]}/_static"
+
+ # .css patch file
+ DIR[CSS_PATCH_FILE]: "{DIR[STATIC]}/theme_overrides.css"
+ DIR[CSS_PATCH_FILE2]: "{DIR[WORKING,SRC,STATIC]}/theme_overrides.css"
+
+ urls:
+ URL[JENKINS,CSIT]: "https://jenkins.fd.io/view/csit/job"
+ URL[NEXUS,LOG]: "https://logs.fd.io/production/vex-yul-rot-jenkins-1"
+ URL[NEXUS]: "https://docs.fd.io/csit"
+ DIR[NEXUS]: "report/_static/archive"
+
+ make-dirs:
+ # List the directories which are created while preparing the environment.
+ # All directories MUST be defined in "paths" section.
+ - "DIR[WORKING,DATA]"
+ - "DIR[WORKING,SRC,STATIC]"
+ - "DIR[BUILD,HTML]"
+ - "DIR[STATIC,VPP]"
+ - "DIR[STATIC,ARCH]"
+ build-dirs:
+ # List the directories where the results (build) is stored.
+ # All directories MUST be defined in "paths" section.
+ - "DIR[BUILD,HTML]"
+
+-
+ type: "configuration"
+
+ data-sets:
+# TODO: Specify input data, this is only an example:
+ plot-performance-trending:
+ csit-vpp-perf-mrr-daily-master:
+ start: 9
+ end: 14 # "lastSuccessfulBuild" # take all from the 'start'
+
+ plot-layouts:
+ plot-cpta:
+ title: ""
+ autosize: False
+ showlegend: True
+ width: 1100
+ height: 800
+ yaxis:
+ showticklabels: True
+ title: "Throughput [Mpps]"
+ hoverformat: ".4s"
+ range: []
+ gridcolor: "rgb(238, 238, 238)"
+ linecolor: "rgb(238, 238, 238)"
+ showline: True
+ zeroline: False
+ tickcolor: "rgb(238, 238, 238)"
+ linewidth: 1
+ showgrid: True
+ xaxis:
+ showticklabels: True
+ title: "VPP Performance Trending Job ID"
+ autorange: True
+ showgrid: True
+ gridcolor: "rgb(238, 238, 238)"
+ linecolor: "rgb(238, 238, 238)"
+ fixedrange: False
+ zeroline: False
+ tickcolor: "rgb(238, 238, 238)"
+ showline: True
+ linewidth: 1
+ autotick: True
+ margin:
+ r: 20
+ b: 50
+ t: 50
+ l: 70
+ legend:
+ orientation: "h"
+ traceorder: "normal"
+# tracegroupgap: 10
+# bordercolor: "rgb(238, 238, 238)"
+# borderwidth: 1
+
+-
+ type: "debug"
+ general:
+ input-format: "xml" # zip or xml
+ extract: "robot-plugin/output.xml" # Only for zip
+ builds:
+ # The files must be in the directory DIR[WORKING,DATA]
+ csit-vpp-perf-mrr-daily-master:
+ -
+ build: 1
+ file: "{DIR[WORKING,DATA]}/output_mrr_1.xml"
+ -
+ build: 2
+ file: "{DIR[WORKING,DATA]}/output_mrr_2.xml"
+ -
+ build: 3
+ file: "{DIR[WORKING,DATA]}/output_mrr_3.xml"
+ -
+ build: 4
+ file: "{DIR[WORKING,DATA]}/output_mrr_4.xml"
+ -
+ build: 5
+ file: "{DIR[WORKING,DATA]}/output_mrr_5.xml"
+ -
+ build: 6
+ file: "{DIR[WORKING,DATA]}/output_mrr_5.xml"
+ -
+ build: 7
+ file: "{DIR[WORKING,DATA]}/output_mrr_5.xml"
+ -
+ build: 8
+ file: "{DIR[WORKING,DATA]}/output_mrr_5.xml"
+ -
+ build: 9
+ file: "{DIR[WORKING,DATA]}/output_mrr_5.xml"
+ -
+ build: 10
+ file: "{DIR[WORKING,DATA]}/output_mrr_5.xml"
+ -
+ build: 11
+ file: "{DIR[WORKING,DATA]}/output_mrr_5.xml"
+ -
+ build: 12
+ file: "{DIR[WORKING,DATA]}/output_mrr_5.xml"
+
+-
+ type: "static"
+ src-path: "{DIR[RST]}"
+ dst-path: "{DIR[WORKING,SRC]}"
+
+-
+ type: "input" # Ignored in debug mode
+ general:
+ file-name: "output.xml.log.gz"
+ file-format: ".gz"
+ download-path: "{job}/{build}/archives/{filename}"
+ extract: "output.xml"
+# file-name: "robot-plugin.zip"
+# file-format: ".zip"
+# download-path: "{job}/{build}/robot/report/*zip*/{filename}"
+# extract: "robot-plugin/output.xml"
+ builds:
+ csit-vpp-perf-mrr-daily-master:
+ start: 9
+ end: 14 #"lastSuccessfulBuild" # take all from the 'start'
+# csit-vpp-perf-check-master:
+# start: 22
+# end: 22
+
+-
+ type: "output"
+ output:
+# "report"
+ "CPTA" # Continuous Performance Trending and Analysis
+ format:
+ html:
+ - full
+ pdf:
+ - minimal
+
+################################################################################
+### C P T A ###
+################################################################################
+
+# Plots VPP Continuous Performance Trending and Analysis
+-
+ type: "cpta"
+ title: "Continuous Performance Trending and Analysis"
+ algorithm: "cpta"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/cpta"
+ plots:
+
+# L2
+
+ - title: "VPP 1T1C L2 64B Packet Throughput - {period} Trending"
+ output-file-name: "l2"
+ data: "plot-performance-trending"
+ filter: "'MRR' and '64B' and ('BASE' or 'SCALE') and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST' and not 'MEMIF'"
+ parameters:
+ - "result"
+ - "name"
+ periods:
+ - 1
+ - 5
+ - 30
+ layout: "plot-cpta"
+
+# IPv4
+
+ - title: "VPP 1T1C IPv4 64B Packet Throughput - {period} Trending"
+ output-file-name: "ip4"
+ data: "plot-performance-trending"
+ filter: "'MRR' and '64B' and ('BASE' or 'SCALE' or 'FEATURE') and '1T1C' and 'IP4FWD' and not 'IPSEC' and not 'VHOST'"
+ parameters:
+ - "result"
+ - "name"
+ periods:
+ - 1
+ - 5
+ - 30
+ layout: "plot-cpta"
+
+# IPv6
+
+ - title: "VPP 1T1C IPv6 78B Packet Throughput - {period} Trending"
+ output-file-name: "ip6"
+ data: "plot-performance-trending"
+ filter: "'MRR' and '78B' and ('BASE' or 'SCALE' or 'FEATURE') and '1T1C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST'"
+ parameters:
+ - "result"
+ - "name"
+ periods:
+ - 1
+ - 5
+ - 30
+ layout: "plot-cpta"
+
+# Container memif
+
+ - title: "VPP 1T1C L2 Container memif 64B Packet Throughput - {period} Trending"
+ output-file-name: "container-memif-l2-1t1c-x520"
+ data: "plot-performance-trending"
+ filter: "'NIC_Intel-X520-DA2' and 'MRR' and '64B' and 'BASE' and '1T1C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
+ parameters:
+ - "result"
+ - "name"
+ periods:
+ - 1
+ - 5
+ - 30
+ layout: "plot-cpta"
+
+ - title: "VPP 2T2C L2 Container memif 64B Packet Throughput - {period} Trending"
+ output-file-name: "container-memif-l2-2t2c-x520"
+ data: "plot-performance-trending"
+ filter: "'NIC_Intel-X520-DA2' and 'MRR' and '64B' and 'BASE' and '2T2C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
+ parameters:
+ - "result"
+ - "name"
+ periods:
+ - 1
+ - 5
+ - 30
+ layout: "plot-cpta"
+
+ - title: "VPP 1T1C L2 Container memif 64B Packet Throughput - {period} Trending"
+ output-file-name: "container-memif-l2-1t1c-xl710"
+ data: "plot-performance-trending"
+ filter: "'NIC_Intel-XL710' and 'MRR' and '64B' and 'BASE' and '1T1C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
+ parameters:
+ - "result"
+ - "name"
+ periods:
+ - 1
+ - 5
+ - 30
+ layout: "plot-cpta"
+
+ - title: "VPP 2T2C L2 Container memif 64B Packet Throughput - {period} Trending"
+ output-file-name: "container-memif-l2-2t2c-xl710"
+ data: "plot-performance-trending"
+ filter: "'NIC_Intel-XL710' and 'MRR' and '64B' and 'BASE' and '2T2C' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
+ parameters:
+ - "result"
+ - "name"
+ periods:
+ - 1
+ - 5
+ - 30
+ layout: "plot-cpta"
+
+# VM vhost
+
+ - title: "VPP 1T1C VM vhost ethip4 64B Packet Throughput - {period} Trending"
+ output-file-name: "vm-vhost-ethip4-1t1c-x520"
+ data: "plot-performance-trending"
+ filter: "'NIC_Intel-X520-DA2' and '64B' and 'MRR' and '1T1C' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD')"
+ parameters:
+ - "result"
+ - "name"
+ periods:
+ - 1
+ - 5
+ - 30
+ layout: "plot-cpta"
+
+ - title: "VPP 2T2C VM vhost ethip4 64B Packet Throughput - {period} Trending"
+ output-file-name: "vm-vhost-ethip4-2t2c-x520"
+ data: "plot-performance-trending"
+ filter: "'NIC_Intel-X520-DA2' and '64B' and 'MRR' and '2T2C' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD')"
+ parameters:
+ - "result"
+ - "name"
+ periods:
+ - 1
+ - 5
+ - 30
+ layout: "plot-cpta"
+
+ - title: "VPP 1T1C VM vhost eth 64B Packet Throughput - {period} Trending"
+ output-file-name: "vm-vhost-eth-1t1c-x520"
+ data: "plot-performance-trending"
+ filter: "'NIC_Intel-X520-DA2' and '64B' and 'MRR' and '1T1C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q' and not '2VM'"
+
+ parameters:
+ - "result"
+ - "name"
+ periods:
+ - 1
+ - 5
+ - 30
+ layout: "plot-cpta"
+
+ - title: "VPP 2T2C VM vhost eth 64B Packet Throughput - {period} Trending"
+ output-file-name: "vm-vhost-eth-2t2c-x520"
+ data: "plot-performance-trending"
+ filter: "'NIC_Intel-X520-DA2' and '64B' and 'MRR' and '2T2C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q' and not '2VM'"
+ parameters:
+ - "result"
+ - "name"
+ periods:
+ - 1
+ - 5
+ - 30
+ layout: "plot-cpta"
+
+ - title: "VPP 1T1C VM vhost eth 64B Packet Throughput - {period} Trending"
+ output-file-name: "vm-vhost-eth-1t1c-xl710"
+ data: "plot-performance-trending"
+ filter: "'NIC_Intel-XL710' and '64B' and 'MRR' and '1T1C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q' and not '2VM'"
+
+ parameters:
+ - "result"
+ - "name"
+ periods:
+ - 1
+ - 5
+ - 30
+ layout: "plot-cpta"
+
+ - title: "VPP 2T2C VM vhost eth 64B Packet Throughput - {period} Trending"
+ output-file-name: "vm-vhost-eth-2t2c-xl710"
+ data: "plot-performance-trending"
+ filter: "'NIC_Intel-XL710' and '64B' and 'MRR' and '2T2C' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q' and not '2VM'"
+ parameters:
+ - "result"
+ - "name"
+ periods:
+ - 1
+ - 5
+ - 30
+ layout: "plot-cpta"
diff --git a/resources/tools/presentation/specification_parser.py b/resources/tools/presentation/specification_parser.py
index 501f9f191a..2659c29ca5 100644
--- a/resources/tools/presentation/specification_parser.py
+++ b/resources/tools/presentation/specification_parser.py
@@ -22,6 +22,7 @@ from yaml import load, YAMLError
from pprint import pformat
from errors import PresentationError
+from utils import get_last_build_number
class Specification(object):
@@ -53,7 +54,8 @@ class Specification(object):
"output": dict(),
"tables": list(),
"plots": list(),
- "files": list()}
+ "files": list(),
+ "cpta": dict()}
@property
def specification(self):
@@ -173,6 +175,17 @@ class Specification(object):
"""
return self._specification["files"]
+ @property
+ def cpta(self):
+ """Getter - Continuous Performance Trending and Analysis to be
+ generated.
+
+ :returns: List of specifications of Continuous Performance Trending and
+ Analysis to be generated.
+ :rtype: list
+ """
+ return self._specification["cpta"]
+
def set_input_state(self, job, build_nr, state):
"""Set the state of input
@@ -354,9 +367,31 @@ class Specification(object):
try:
self._specification["configuration"] = self._cfg_yaml[idx]
+
except KeyError:
raise PresentationError("No configuration defined.")
+ # Data sets: Replace ranges by lists
+ for set_name, data_set in self.configuration["data-sets"].items():
+ for job, builds in data_set.items():
+ if builds:
+ if isinstance(builds, dict):
+ # defined as a range <start, end>
+ if builds.get("end", None) == "lastSuccessfulBuild":
+ # defined as a range <start, lastSuccessfulBuild>
+ ret_code, build_nr, _ = get_last_build_number(
+ self.environment["urls"]["URL[JENKINS,CSIT]"],
+ job)
+ if ret_code != 0:
+ raise PresentationError(
+ "Not possible to get the number of the "
+ "last successful build.")
+ else:
+ # defined as a range <start, end (build number)>
+ build_nr = builds.get("end", None)
+ builds = [x for x in range(1, int(build_nr)+1)]
+ self.configuration["data-sets"][set_name][job] = builds
+
logging.info("Done.")
def _parse_debug(self):
@@ -412,8 +447,25 @@ class Specification(object):
for key, value in self._cfg_yaml[idx]["general"].items():
self._specification["input"][key] = value
self._specification["input"]["builds"] = dict()
+
for job, builds in self._cfg_yaml[idx]["builds"].items():
if builds:
+ if isinstance(builds, dict):
+ # defined as a range <start, end>
+ if builds.get("end", None) == "lastSuccessfulBuild":
+ # defined as a range <start, lastSuccessfulBuild>
+ ret_code, build_nr, _ = get_last_build_number(
+ self.environment["urls"]["URL[JENKINS,CSIT]"],
+ job)
+ if ret_code != 0:
+ raise PresentationError(
+ "Not possible to get the number of the "
+ "last successful build.")
+ else:
+ # defined as a range <start, end (build number)>
+ build_nr = builds.get("end", None)
+ builds = [x for x in range(builds["start"],
+ int(build_nr) + 1)]
self._specification["input"]["builds"][job] = list()
for build in builds:
self._specification["input"]["builds"][job].\
@@ -440,8 +492,8 @@ class Specification(object):
raise PresentationError("No output defined.")
try:
- self._specification["output"] = self._cfg_yaml[idx]["format"]
- except KeyError:
+ self._specification["output"] = self._cfg_yaml[idx]
+ except (KeyError, IndexError):
raise PresentationError("No output defined.")
logging.info("Done.")
@@ -535,6 +587,35 @@ class Specification(object):
self._specification["files"].append(element)
count += 1
+ elif element["type"] == "cpta":
+ logging.info(" {:3d} Processing Continuous Performance "
+ "Trending and Analysis ...".format(count))
+
+ for plot in element["plots"]:
+ # Add layout to the plots:
+ layout = plot.get("layout", None)
+ if layout is not None:
+ try:
+ plot["layout"] = \
+ self.configuration["plot-layouts"][layout]
+ except KeyError:
+ raise PresentationError(
+ "Layout {0} is not defined in the "
+ "configuration section.".format(layout))
+ # Add data sets:
+ if isinstance(plot.get("data", None), str):
+ data_set = plot["data"]
+ try:
+ plot["data"] = \
+ self.configuration["data-sets"][data_set]
+ except KeyError:
+ raise PresentationError(
+ "Data set {0} is not defined in "
+ "the configuration section.".
+ format(data_set))
+ self._specification["cpta"] = element
+ count += 1
+
logging.info("Done.")
def read_specification(self):
diff --git a/resources/tools/presentation/utils.py b/resources/tools/presentation/utils.py
index 7037404c27..966d7f558b 100644
--- a/resources/tools/presentation/utils.py
+++ b/resources/tools/presentation/utils.py
@@ -14,12 +14,18 @@
"""General purpose utilities.
"""
+import subprocess
import numpy as np
+import pandas as pd
+import logging
-from os import walk
-from os.path import join
+from os import walk, makedirs, environ
+from os.path import join, isdir
+from shutil import copy, Error
from math import sqrt
+from errors import PresentationError
+
def mean(items):
"""Calculate mean value from the items.
@@ -62,27 +68,37 @@ def relative_change(nr1, nr2):
return float(((nr2 - nr1) / nr1) * 100)
-def remove_outliers(input_data, outlier_const):
- """
+def find_outliers(input_data, outlier_const=1.5):
+ """Go through the input data and generate two pandas series:
+ - input data without outliers
+ - outliers.
+ The function uses IQR to detect outliers.
- :param input_data: Data from which the outliers will be removed.
+ :param input_data: Data to be examined for outliers.
:param outlier_const: Outlier constant.
- :type input_data: list
+ :type input_data: pandas.Series
:type outlier_const: float
- :returns: The input list without outliers.
- :rtype: list
+ :returns: Tuple: input data with outliers removed; Outliers.
+ :rtype: tuple (trimmed_data, outliers)
"""
- data = np.array(input_data)
- upper_quartile = np.percentile(data, 75)
- lower_quartile = np.percentile(data, 25)
+ upper_quartile = input_data.quantile(q=0.75)
+ lower_quartile = input_data.quantile(q=0.25)
iqr = (upper_quartile - lower_quartile) * outlier_const
- quartile_set = (lower_quartile - iqr, upper_quartile + iqr)
- result_lst = list()
- for y in data.tolist():
- if quartile_set[0] <= y <= quartile_set[1]:
- result_lst.append(y)
- return result_lst
+ low = lower_quartile - iqr
+ high = upper_quartile + iqr
+ trimmed_data = pd.Series()
+ outliers = pd.Series()
+ for item in input_data.items():
+ item_pd = pd.Series([item[1], ], index=[item[0], ])
+ if low <= item[1] <= high:
+ trimmed_data = trimmed_data.append(item_pd)
+ else:
+ trimmed_data = trimmed_data.append(pd.Series([np.nan, ],
+ index=[item[0], ]))
+ outliers = outliers.append(item_pd)
+
+ return trimmed_data, outliers
def get_files(path, extension=None, full_path=True):
@@ -127,3 +143,78 @@ def get_rst_title_char(level):
return chars[level]
else:
return chars[-1]
+
+
+def execute_command(cmd):
+ """Execute the command in a subprocess and log the stdout and stderr.
+
+ :param cmd: Command to execute.
+ :type cmd: str
+ :returns: Return code of the executed command.
+ :rtype: int
+ """
+
+ env = environ.copy()
+ proc = subprocess.Popen(
+ [cmd],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=True,
+ env=env)
+
+ stdout, stderr = proc.communicate()
+
+ logging.info(stdout)
+ logging.info(stderr)
+
+ if proc.returncode != 0:
+ logging.error(" Command execution failed.")
+ return proc.returncode, stdout, stderr
+
+
+def get_last_build_number(jenkins_url, job_name):
+ """
+
+ :param jenkins_url:
+ :param job_name:
+ :return:
+ """
+
+ url = "{}/{}/lastSuccessfulBuild/buildNumber".format(jenkins_url, job_name)
+ cmd = "wget -qO- {url}".format(url=url)
+
+ return execute_command(cmd)
+
+
+def archive_input_data(spec):
+ """Archive the report.
+
+ :param spec: Specification read from the specification file.
+ :type spec: Specification
+ :raises PresentationError: If it is not possible to archive the input data.
+ """
+
+ logging.info(" Archiving the input data files ...")
+
+ if spec.is_debug:
+ extension = spec.debug["input-format"]
+ else:
+ extension = spec.input["file-format"]
+ data_files = get_files(spec.environment["paths"]["DIR[WORKING,DATA]"],
+ extension=extension)
+ dst = spec.environment["paths"]["DIR[STATIC,ARCH]"]
+ logging.info(" Destination: {0}".format(dst))
+
+ try:
+ if not isdir(dst):
+ makedirs(dst)
+
+ for data_file in data_files:
+ logging.info(" Copying the file: {0} ...".format(data_file))
+ copy(data_file, dst)
+
+ except (Error, OSError) as err:
+ raise PresentationError("Not possible to archive the input data.",
+ str(err))
+
+ logging.info(" Done.")