aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools/presentation
diff options
context:
space:
mode:
authorTibor Frank <tifrank@cisco.com>2018-05-29 10:45:47 +0200
committerTibor Frank <tifrank@cisco.com>2018-05-30 11:06:08 +0000
commite01470ec9038338409a494a2652eecabf4394578 (patch)
tree9a2e19441e2456652901b0e6438fdf909668f6fa /resources/tools/presentation
parent564c2ae4f2d3cc7a210f6fe17f55091afcc05d45 (diff)
CSIT-1105: Prepare and generate 18.01.2 report
Change-Id: Iebda4fd10701c27512b443c14b2aeef314003d58 Signed-off-by: Tibor Frank <tifrank@cisco.com>
Diffstat (limited to 'resources/tools/presentation')
-rw-r--r--resources/tools/presentation/environment.py39
-rw-r--r--resources/tools/presentation/generator_CPTA.py498
-rw-r--r--resources/tools/presentation/generator_files.py4
-rw-r--r--resources/tools/presentation/generator_plots.py8
-rw-r--r--resources/tools/presentation/generator_report.py4
-rw-r--r--resources/tools/presentation/generator_tables.py526
-rw-r--r--resources/tools/presentation/input_data_files.py394
-rw-r--r--resources/tools/presentation/input_data_parser.py205
-rw-r--r--resources/tools/presentation/pal.py27
-rw-r--r--resources/tools/presentation/specification.yaml1880
-rw-r--r--resources/tools/presentation/specification_parser.py60
-rw-r--r--resources/tools/presentation/utils.py161
12 files changed, 2051 insertions, 1755 deletions
diff --git a/resources/tools/presentation/environment.py b/resources/tools/presentation/environment.py
index 05376e0e09..a2fa9a0d5b 100644
--- a/resources/tools/presentation/environment.py
+++ b/resources/tools/presentation/environment.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 Cisco and/or its affiliates.
+# Copyright (c) 2018 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -51,35 +51,6 @@ class Environment(object):
"""
return self._env
- def _set_environment_variables(self):
- """Set environment variables.
- """
- logging.info("Setting the environment variables ...")
- # logging.debug("Environment variables before:\n{}".format(os.environ))
-
- count = 1
-
- for var, value in self._env["configuration"].items():
- logging.debug(" {:3d} Setting the variable {} = {}".
- format(count, var, value))
- os.environ[var] = str(value)
- count += 1
-
- for var, value in self._env["paths"].items():
- logging.debug(" {:3d} Setting the variable {} = {}".
- format(count, var, value))
- os.environ[var] = str(value)
- count += 1
-
- for var, value in self._env["urls"].items():
- logging.debug(" {:3d} Setting the variable {} = {}".
- format(count, var, value))
- os.environ[var] = str(value)
- count += 1
-
- # logging.debug("Environment variables after:\n{}".format(os.environ))
- logging.info("Done.")
-
def _make_dirs(self):
"""Create the directories specified in the 'make-dirs' part of
'environment' section in the specification file.
@@ -122,7 +93,6 @@ class Environment(object):
"""Set the environment.
"""
- self._set_environment_variables()
self._make_dirs()
@@ -147,9 +117,10 @@ def clean_environment(env):
if os.path.isdir(dir_to_remove):
try:
shutil.rmtree(dir_to_remove)
- except OSError:
- raise PresentationError("Cannot remove the directory '{}'".
- format(dir_to_remove))
+ except OSError as err:
+ logging.warning("Cannot remove the directory '{}'".
+ format(dir_to_remove))
+ logging.debug(str(err))
else:
logging.warning("The directory '{}' does not exist.".
format(dir_to_remove))
diff --git a/resources/tools/presentation/generator_CPTA.py b/resources/tools/presentation/generator_CPTA.py
index 9195787b46..2c62e11a97 100644
--- a/resources/tools/presentation/generator_CPTA.py
+++ b/resources/tools/presentation/generator_CPTA.py
@@ -14,25 +14,28 @@
"""Generation of Continuous Performance Trending and Analysis.
"""
-import datetime
+import multiprocessing
+import os
import logging
import csv
import prettytable
import plotly.offline as ploff
import plotly.graph_objs as plgo
import plotly.exceptions as plerr
-import numpy as np
import pandas as pd
from collections import OrderedDict
-from utils import find_outliers, archive_input_data, execute_command
+from datetime import datetime
+
+from utils import split_outliers, archive_input_data, execute_command,\
+ classify_anomalies, Worker
# Command to build the html format of the report
HTML_BUILDER = 'sphinx-build -v -c conf_cpta -a ' \
'-b html -E ' \
'-t html ' \
- '-D version="Generated on {date}" ' \
+ '-D version="{date}" ' \
'{working_dir} ' \
'{build_dir}/'
@@ -64,7 +67,7 @@ def generate_cpta(spec, data):
ret_code = _generate_all_charts(spec, data)
cmd = HTML_BUILDER.format(
- date=datetime.date.today().strftime('%d-%b-%Y'),
+ date=datetime.utcnow().strftime('%m/%d/%Y %H:%M UTC'),
working_dir=spec.environment["paths"]["DIR[WORKING,SRC]"],
build_dir=spec.environment["paths"]["DIR[BUILD,HTML]"])
execute_command(cmd)
@@ -84,196 +87,84 @@ def generate_cpta(spec, data):
return ret_code
-def _select_data(in_data, period, fill_missing=False, use_first=False):
- """Select the data from the full data set. The selection is done by picking
- the samples depending on the period: period = 1: All, period = 2: every
- second sample, period = 3: every third sample ...
-
- :param in_data: Full set of data.
- :param period: Sampling period.
- :param fill_missing: If the chosen sample is missing in the full set, its
- nearest neighbour is used.
- :param use_first: Use the first sample even though it is not chosen.
- :type in_data: OrderedDict
- :type period: int
- :type fill_missing: bool
- :type use_first: bool
- :returns: Reduced data.
- :rtype: OrderedDict
- """
-
- first_idx = min(in_data.keys())
- last_idx = max(in_data.keys())
-
- idx = last_idx
- data_dict = dict()
- if use_first:
- data_dict[first_idx] = in_data[first_idx]
- while idx >= first_idx:
- data = in_data.get(idx, None)
- if data is None:
- if fill_missing:
- threshold = int(round(idx - period / 2)) + 1 - period % 2
- idx_low = first_idx if threshold < first_idx else threshold
- threshold = int(round(idx + period / 2))
- idx_high = last_idx if threshold > last_idx else threshold
-
- flag_l = True
- flag_h = True
- idx_lst = list()
- inc = 1
- while flag_l or flag_h:
- if idx + inc > idx_high:
- flag_h = False
- else:
- idx_lst.append(idx + inc)
- if idx - inc < idx_low:
- flag_l = False
- else:
- idx_lst.append(idx - inc)
- inc += 1
-
- for i in idx_lst:
- if i in in_data.keys():
- data_dict[i] = in_data[i]
- break
- else:
- data_dict[idx] = data
- idx -= period
-
- return OrderedDict(sorted(data_dict.items(), key=lambda t: t[0]))
-
-
-def _evaluate_results(in_data, trimmed_data, window=10):
- """Evaluates if the sample value is regress, normal or progress compared to
- previous data within the window.
- We use the intervals defined as:
- - regress: less than median - 3 * stdev
- - normal: between median - 3 * stdev and median + 3 * stdev
- - progress: more than median + 3 * stdev
-
- :param in_data: Full data set.
- :param trimmed_data: Full data set without the outliers.
- :param window: Window size used to calculate moving median and moving stdev.
- :type in_data: pandas.Series
- :type trimmed_data: pandas.Series
- :type window: int
- :returns: Evaluated results.
- :rtype: list
- """
-
- if len(in_data) > 2:
- win_size = in_data.size if in_data.size < window else window
- results = [0.0, ] * win_size
- median = in_data.rolling(window=win_size).median()
- stdev_t = trimmed_data.rolling(window=win_size, min_periods=2).std()
- m_vals = median.values
- s_vals = stdev_t.values
- d_vals = in_data.values
- for day in range(win_size, in_data.size):
- if np.isnan(m_vals[day - 1]) or np.isnan(s_vals[day - 1]):
- results.append(0.0)
- elif d_vals[day] < (m_vals[day - 1] - 3 * s_vals[day - 1]):
- results.append(0.33)
- elif (m_vals[day - 1] - 3 * s_vals[day - 1]) <= d_vals[day] <= \
- (m_vals[day - 1] + 3 * s_vals[day - 1]):
- results.append(0.66)
- else:
- results.append(1.0)
- else:
- results = [0.0, ]
- try:
- median = np.median(in_data)
- stdev = np.std(in_data)
- if in_data.values[-1] < (median - 3 * stdev):
- results.append(0.33)
- elif (median - 3 * stdev) <= in_data.values[-1] <= (
- median + 3 * stdev):
- results.append(0.66)
- else:
- results.append(1.0)
- except TypeError:
- results.append(None)
- return results
-
-
-def _generate_trending_traces(in_data, period, moving_win_size=10,
- fill_missing=True, use_first=False,
- show_moving_median=True, name="", color=""):
+def _generate_trending_traces(in_data, build_info, moving_win_size=10,
+ show_trend_line=True, name="", color=""):
"""Generate the trending traces:
- samples,
- - moving median (trending plot)
+ - trimmed moving median (trending line)
- outliers, regress, progress
:param in_data: Full data set.
- :param period: Sampling period.
+ :param build_info: Information about the builds.
:param moving_win_size: Window size.
- :param fill_missing: If the chosen sample is missing in the full set, its
- nearest neighbour is used.
- :param use_first: Use the first sample even though it is not chosen.
- :param show_moving_median: Show moving median (trending plot).
+ :param show_trend_line: Show moving median (trending plot).
:param name: Name of the plot
:param color: Name of the color for the plot.
:type in_data: OrderedDict
- :type period: int
+ :type build_info: dict
:type moving_win_size: int
- :type fill_missing: bool
- :type use_first: bool
- :type show_moving_median: bool
+ :type show_trend_line: bool
:type name: str
:type color: str
- :returns: Generated traces (list) and the evaluated result (float).
+ :returns: Generated traces (list) and the evaluated result.
:rtype: tuple(traces, result)
"""
- if period > 1:
- in_data = _select_data(in_data, period,
- fill_missing=fill_missing,
- use_first=use_first)
+ data_x = list(in_data.keys())
+ data_y = list(in_data.values())
- data_x = [key for key in in_data.keys()]
- data_y = [val for val in in_data.values()]
- data_pd = pd.Series(data_y, index=data_x)
+ hover_text = list()
+ xaxis = list()
+ for idx in data_x:
+ hover_text.append("vpp-ref: {0}<br>csit-ref: mrr-daily-build-{1}".
+ format(build_info[str(idx)][1].rsplit('~', 1)[0],
+ idx))
+ date = build_info[str(idx)][0]
+ xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
+ int(date[9:11]), int(date[12:])))
- t_data, outliers = find_outliers(data_pd)
+ data_pd = pd.Series(data_y, index=xaxis)
- results = _evaluate_results(data_pd, t_data, window=moving_win_size)
+ t_data, outliers = split_outliers(data_pd, outlier_const=1.5,
+ window=moving_win_size)
+ anomaly_classification = classify_anomalies(t_data, window=moving_win_size)
anomalies = pd.Series()
- anomalies_res = list()
- for idx, item in enumerate(in_data.items()):
- item_pd = pd.Series([item[1], ], index=[item[0], ])
- if item[0] in outliers.keys():
- anomalies = anomalies.append(item_pd)
- anomalies_res.append(0.0)
- elif results[idx] in (0.33, 1.0):
- anomalies = anomalies.append(item_pd)
- anomalies_res.append(results[idx])
- anomalies_res.extend([0.0, 0.33, 0.66, 1.0])
+ anomalies_colors = list()
+ anomaly_color = {
+ "outlier": 0.0,
+ "regression": 0.33,
+ "normal": 0.66,
+ "progression": 1.0
+ }
+ if anomaly_classification:
+ for idx, item in enumerate(data_pd.items()):
+ if anomaly_classification[idx] in \
+ ("outlier", "regression", "progression"):
+ anomalies = anomalies.append(pd.Series([item[1], ],
+ index=[item[0], ]))
+ anomalies_colors.append(
+ anomaly_color[anomaly_classification[idx]])
+ anomalies_colors.extend([0.0, 0.33, 0.66, 1.0])
# Create traces
- color_scale = [[0.00, "grey"],
- [0.25, "grey"],
- [0.25, "red"],
- [0.50, "red"],
- [0.50, "white"],
- [0.75, "white"],
- [0.75, "green"],
- [1.00, "green"]]
trace_samples = plgo.Scatter(
- x=data_x,
+ x=xaxis,
y=data_y,
mode='markers',
line={
"width": 1
},
+ legendgroup=name,
name="{name}-thput".format(name=name),
marker={
"size": 5,
"color": color,
"symbol": "circle",
},
+ text=hover_text,
+ hoverinfo="x+y+text+name"
)
traces = [trace_samples, ]
@@ -282,14 +173,21 @@ def _generate_trending_traces(in_data, period, moving_win_size=10,
y=anomalies.values,
mode='markers',
hoverinfo="none",
- showlegend=False,
+ showlegend=True,
legendgroup=name,
- name="{name}: outliers".format(name=name),
+ name="{name}-anomalies".format(name=name),
marker={
"size": 15,
"symbol": "circle-open",
- "color": anomalies_res,
- "colorscale": color_scale,
+ "color": anomalies_colors,
+ "colorscale": [[0.00, "grey"],
+ [0.25, "grey"],
+ [0.25, "red"],
+ [0.50, "red"],
+ [0.50, "white"],
+ [0.75, "white"],
+ [0.75, "green"],
+ [1.00, "green"]],
"showscale": True,
"line": {
"width": 2
@@ -314,43 +212,24 @@ def _generate_trending_traces(in_data, period, moving_win_size=10,
)
traces.append(trace_anomalies)
- if show_moving_median:
- data_mean_y = pd.Series(data_y).rolling(
- window=moving_win_size, min_periods=2).median()
- trace_median = plgo.Scatter(
- x=data_x,
- y=data_mean_y,
+ if show_trend_line:
+ data_trend = t_data.rolling(window=moving_win_size,
+ min_periods=2).median()
+ trace_trend = plgo.Scatter(
+ x=data_trend.keys(),
+ y=data_trend.tolist(),
mode='lines',
line={
"shape": "spline",
"width": 1,
"color": color,
},
+ legendgroup=name,
name='{name}-trend'.format(name=name)
)
- traces.append(trace_median)
-
- return traces, results[-1]
+ traces.append(trace_trend)
-
-def _generate_chart(traces, layout, file_name):
- """Generates the whole chart using pre-generated traces.
-
- :param traces: Traces for the chart.
- :param layout: Layout of the chart.
- :param file_name: File name for the generated chart.
- :type traces: list
- :type layout: dict
- :type file_name: str
- """
-
- # Create plot
- logging.info(" Writing the file '{0}' ...".format(file_name))
- plpl = plgo.Figure(data=traces, layout=layout)
- try:
- ploff.plot(plpl, show_link=False, auto_open=False, filename=file_name)
- except plerr.PlotlyEmptyDataError:
- logging.warning(" No data for the plot. Skipped.")
+ return traces, anomaly_classification[-1]
def _generate_all_charts(spec, input_data):
@@ -362,50 +241,38 @@ def _generate_all_charts(spec, input_data):
:type input_data: InputData
"""
- job_name = spec.cpta["data"].keys()[0]
+ def _generate_chart(_, data_q, graph):
+ """Generates the chart.
+ """
- builds_lst = list()
- for build in spec.input["builds"][job_name]:
- status = build["status"]
- if status != "failed" and status != "not found":
- builds_lst.append(str(build["build"]))
- print(builds_lst)
+ logs = list()
- # Get "build ID": "date" dict:
- build_dates = dict()
- for build in builds_lst:
- try:
- build_dates[build] = \
- input_data.metadata(job_name, build)["generated"][:14]
- except KeyError:
- pass
+ logging.info(" Generating the chart '{0}' ...".
+ format(graph.get("title", "")))
+ logs.append(("INFO", " Generating the chart '{0}' ...".
+ format(graph.get("title", ""))))
- # Create the header:
- csv_table = list()
- header = "Build Number:," + ",".join(builds_lst) + '\n'
- csv_table.append(header)
- header = "Build Date:," + ",".join(build_dates.values()) + '\n'
- csv_table.append(header)
+ job_name = spec.cpta["data"].keys()[0]
- results = list()
- for chart in spec.cpta["plots"]:
- logging.info(" Generating the chart '{0}' ...".
- format(chart.get("title", "")))
+ csv_tbl = list()
+ res = list()
# Transform the data
- data = input_data.filter_data(chart, continue_on_error=True)
+ logs.append(("INFO", " Creating the data set for the {0} '{1}'.".
+ format(graph.get("type", ""), graph.get("title", ""))))
+ data = input_data.filter_data(graph, continue_on_error=True)
if data is None:
logging.error("No data.")
return
chart_data = dict()
for job in data:
- for idx, build in job.items():
- for test_name, test in build.items():
+ for index, bld in job.items():
+ for test_name, test in bld.items():
if chart_data.get(test_name, None) is None:
chart_data[test_name] = OrderedDict()
try:
- chart_data[test_name][int(idx)] = \
+ chart_data[test_name][int(index)] = \
test["result"]["throughput"]
except (KeyError, TypeError):
pass
@@ -413,46 +280,130 @@ def _generate_all_charts(spec, input_data):
# Add items to the csv table:
for tst_name, tst_data in chart_data.items():
tst_lst = list()
- for build in builds_lst:
- item = tst_data.get(int(build), '')
- tst_lst.append(str(item) if item else '')
- csv_table.append("{0},".format(tst_name) + ",".join(tst_lst) + '\n')
-
- for period in chart["periods"]:
- # Generate traces:
- traces = list()
- win_size = 10 if period == 1 else 5 if period < 20 else 3
- idx = 0
- for test_name, test_data in chart_data.items():
- if not test_data:
- logging.warning("No data for the test '{0}'".
- format(test_name))
- continue
- test_name = test_name.split('.')[-1]
- trace, result = _generate_trending_traces(
- test_data,
- period=period,
- moving_win_size=win_size,
- fill_missing=True,
- use_first=False,
- name='-'.join(test_name.split('-')[3:-1]),
- color=COLORS[idx])
- traces.extend(trace)
- results.append(result)
- idx += 1
-
+ for bld in builds_lst:
+ itm = tst_data.get(int(bld), '')
+ tst_lst.append(str(itm))
+ csv_tbl.append("{0},".format(tst_name) + ",".join(tst_lst) + '\n')
+ # Generate traces:
+ traces = list()
+ win_size = 14
+ index = 0
+ for test_name, test_data in chart_data.items():
+ if not test_data:
+ logs.append(("WARNING", "No data for the test '{0}'".
+ format(test_name)))
+ continue
+ test_name = test_name.split('.')[-1]
+ trace, rslt = _generate_trending_traces(
+ test_data,
+ build_info=build_info,
+ moving_win_size=win_size,
+ name='-'.join(test_name.split('-')[3:-1]),
+ color=COLORS[index])
+ traces.extend(trace)
+ res.append(rslt)
+ index += 1
+
+ if traces:
# Generate the chart:
- chart["layout"]["xaxis"]["title"] = \
- chart["layout"]["xaxis"]["title"].format(job=job_name)
- _generate_chart(traces,
- chart["layout"],
- file_name="{0}-{1}-{2}{3}".format(
- spec.cpta["output-file"],
- chart["output-file-name"],
- period,
- spec.cpta["output-file-type"]))
-
- logging.info(" Done.")
+ graph["layout"]["xaxis"]["title"] = \
+ graph["layout"]["xaxis"]["title"].format(job=job_name)
+ name_file = "{0}-{1}{2}".format(spec.cpta["output-file"],
+ graph["output-file-name"],
+ spec.cpta["output-file-type"])
+
+ logs.append(("INFO", " Writing the file '{0}' ...".
+ format(name_file)))
+ plpl = plgo.Figure(data=traces, layout=graph["layout"])
+ try:
+ ploff.plot(plpl, show_link=False, auto_open=False,
+ filename=name_file)
+ except plerr.PlotlyEmptyDataError:
+ logs.append(("WARNING", "No data for the plot. Skipped."))
+
+ data_out = {
+ "csv_table": csv_tbl,
+ "results": res,
+ "logs": logs
+ }
+ data_q.put(data_out)
+
+ job_name = spec.cpta["data"].keys()[0]
+
+ builds_lst = list()
+ for build in spec.input["builds"][job_name]:
+ status = build["status"]
+ if status != "failed" and status != "not found":
+ builds_lst.append(str(build["build"]))
+
+ # Get "build ID": "date" dict:
+ build_info = OrderedDict()
+ for build in builds_lst:
+ try:
+ build_info[build] = (
+ input_data.metadata(job_name, build)["generated"][:14],
+ input_data.metadata(job_name, build)["version"]
+ )
+ except KeyError:
+ build_info[build] = ("", "")
+
+ work_queue = multiprocessing.JoinableQueue()
+ manager = multiprocessing.Manager()
+ data_queue = manager.Queue()
+ cpus = multiprocessing.cpu_count()
+
+ workers = list()
+ for cpu in range(cpus):
+ worker = Worker(work_queue,
+ data_queue,
+ _generate_chart)
+ worker.daemon = True
+ worker.start()
+ workers.append(worker)
+ os.system("taskset -p -c {0} {1} > /dev/null 2>&1".
+ format(cpu, worker.pid))
+
+ for chart in spec.cpta["plots"]:
+ work_queue.put((chart, ))
+ work_queue.join()
+
+ anomaly_classifications = list()
+
+ # Create the header:
+ csv_table = list()
+ header = "Build Number:," + ",".join(builds_lst) + '\n'
+ csv_table.append(header)
+ build_dates = [x[0] for x in build_info.values()]
+ header = "Build Date:," + ",".join(build_dates) + '\n'
+ csv_table.append(header)
+ vpp_versions = [x[1] for x in build_info.values()]
+ header = "VPP Version:," + ",".join(vpp_versions) + '\n'
+ csv_table.append(header)
+
+ while not data_queue.empty():
+ result = data_queue.get()
+
+ anomaly_classifications.extend(result["results"])
+ csv_table.extend(result["csv_table"])
+
+ for item in result["logs"]:
+ if item[0] == "INFO":
+ logging.info(item[1])
+ elif item[0] == "ERROR":
+ logging.error(item[1])
+ elif item[0] == "DEBUG":
+ logging.debug(item[1])
+ elif item[0] == "CRITICAL":
+ logging.critical(item[1])
+ elif item[0] == "WARNING":
+ logging.warning(item[1])
+
+ del data_queue
+
+ # Terminate all workers
+ for worker in workers:
+ worker.terminate()
+ worker.join()
# Write the tables:
file_name = spec.cpta["output-file"] + "-trending"
@@ -473,24 +424,27 @@ def _generate_all_charts(spec, input_data):
row[idx] = str(round(float(item) / 1000000, 2))
except ValueError:
pass
- txt_table.add_row(row)
+ try:
+ txt_table.add_row(row)
+ except Exception as err:
+ logging.warning("Error occurred while generating TXT table:"
+ "\n{0}".format(err))
line_nr += 1
txt_table.align["Build Number:"] = "l"
with open("{0}.txt".format(file_name), "w") as txt_file:
txt_file.write(str(txt_table))
# Evaluate result:
- result = "PASS"
- for item in results:
- if item is None:
- result = "FAIL"
- break
- if item == 0.66 and result == "PASS":
- result = "PASS"
- elif item == 0.33 or item == 0.0:
- result = "FAIL"
-
- logging.info("Partial results: {0}".format(results))
+ if anomaly_classifications:
+ result = "PASS"
+ for classification in anomaly_classifications:
+ if classification == "regression" or classification == "outlier":
+ result = "FAIL"
+ break
+ else:
+ result = "FAIL"
+
+ logging.info("Partial results: {0}".format(anomaly_classifications))
logging.info("Result: {0}".format(result))
return result
diff --git a/resources/tools/presentation/generator_files.py b/resources/tools/presentation/generator_files.py
index 1cd1b6dfbb..e717815cd0 100644
--- a/resources/tools/presentation/generator_files.py
+++ b/resources/tools/presentation/generator_files.py
@@ -141,9 +141,13 @@ def file_merged_test_results(file_spec, input_data):
logging.info(" Writing file '{0}'".format(file_name))
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(file_spec.get("type", ""), file_spec.get("title", "")))
tests = input_data.filter_data(file_spec)
tests = input_data.merge_data(tests)
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(file_spec.get("type", ""), file_spec.get("title", "")))
suites = input_data.filter_data(file_spec, data_set="suites")
suites = input_data.merge_data(suites)
suites.sort_index(inplace=True)
diff --git a/resources/tools/presentation/generator_plots.py b/resources/tools/presentation/generator_plots.py
index b7fd420aa2..6faf4c3935 100644
--- a/resources/tools/presentation/generator_plots.py
+++ b/resources/tools/presentation/generator_plots.py
@@ -59,6 +59,8 @@ def plot_performance_box(plot, input_data):
format(plot.get("title", "")))
# Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(plot.get("type", ""), plot.get("title", "")))
data = input_data.filter_data(plot)
if data is None:
logging.error("No data.")
@@ -129,6 +131,8 @@ def plot_latency_box(plot, input_data):
format(plot.get("title", "")))
# Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(plot.get("type", ""), plot.get("title", "")))
data = input_data.filter_data(plot)
if data is None:
logging.error("No data.")
@@ -236,6 +240,8 @@ def plot_throughput_speedup_analysis(plot, input_data):
format(plot.get("title", "")))
# Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(plot.get("type", ""), plot.get("title", "")))
data = input_data.filter_data(plot)
if data is None:
logging.error("No data.")
@@ -335,6 +341,8 @@ def plot_http_server_performance_box(plot, input_data):
format(plot.get("title", "")))
# Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(plot.get("type", ""), plot.get("title", "")))
data = input_data.filter_data(plot)
if data is None:
logging.error("No data.")
diff --git a/resources/tools/presentation/generator_report.py b/resources/tools/presentation/generator_report.py
index 6819f350b6..07103dbb1f 100644
--- a/resources/tools/presentation/generator_report.py
+++ b/resources/tools/presentation/generator_report.py
@@ -103,7 +103,7 @@ def generate_html_report(release, spec, versions):
cmd = HTML_BUILDER.format(
release=release,
- date=datetime.date.today().strftime('%d-%b-%Y'),
+ date=datetime.datetime.utcnow().strftime('%m/%d/%Y %H:%M UTC'),
working_dir=spec.environment["paths"]["DIR[WORKING,SRC]"],
build_dir=spec.environment["paths"]["DIR[BUILD,HTML]"])
execute_command(cmd)
@@ -148,7 +148,7 @@ def generate_pdf_report(release, spec, versions):
build_dir = spec.environment["paths"]["DIR[BUILD,LATEX]"]
cmd = PDF_BUILDER.format(
release=release,
- date=datetime.date.today().strftime('%d-%b-%Y'),
+ date=datetime.datetime.utcnow().strftime('%m/%d/%Y %H:%M UTC'),
working_dir=spec.environment["paths"]["DIR[WORKING,SRC]"],
build_dir=build_dir)
execute_command(cmd)
diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py
index a667fffb16..8791ae5804 100644
--- a/resources/tools/presentation/generator_tables.py
+++ b/resources/tools/presentation/generator_tables.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 Cisco and/or its affiliates.
+# Copyright (c) 2018 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -18,11 +18,16 @@
import logging
import csv
import prettytable
+import pandas as pd
from string import replace
+from collections import OrderedDict
+from numpy import nan, isnan
+from xml.etree import ElementTree as ET
from errors import PresentationError
-from utils import mean, stdev, relative_change, remove_outliers
+from utils import mean, stdev, relative_change, remove_outliers,\
+ split_outliers, classify_anomalies
def generate_tables(spec, data):
@@ -58,6 +63,8 @@ def table_details(table, input_data):
format(table.get("title", "")))
# Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
data = input_data.filter_data(table)
# Prepare the header of the tables
@@ -124,10 +131,14 @@ def table_merged_details(table, input_data):
format(table.get("title", "")))
# Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
data = input_data.filter_data(table)
data = input_data.merge_data(data)
data.sort_index(inplace=True)
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
suites = input_data.filter_data(table, data_set="suites")
suites = input_data.merge_data(suites)
@@ -221,6 +232,8 @@ def table_performance_improvements(table, input_data):
return None
# Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
data = input_data.filter_data(table)
# Prepare the header of the tables
@@ -352,16 +365,26 @@ def table_performance_comparison(table, input_data):
format(table.get("title", "")))
# Transform the data
- data = input_data.filter_data(table)
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
+ data = input_data.filter_data(table, continue_on_error=True)
# Prepare the header of the tables
try:
- header = ["Test case",
- "{0} Throughput [Mpps]".format(table["reference"]["title"]),
- "{0} stdev [Mpps]".format(table["reference"]["title"]),
- "{0} Throughput [Mpps]".format(table["compare"]["title"]),
- "{0} stdev [Mpps]".format(table["compare"]["title"]),
- "Change [%]"]
+ header = ["Test case", ]
+
+ history = table.get("history", None)
+ if history:
+ for item in history:
+ header.extend(
+ ["{0} Throughput [Mpps]".format(item["title"]),
+ "{0} Stdev [Mpps]".format(item["title"])])
+ header.extend(
+ ["{0} Throughput [Mpps]".format(table["reference"]["title"]),
+ "{0} Stdev [Mpps]".format(table["reference"]["title"]),
+ "{0} Throughput [Mpps]".format(table["compare"]["title"]),
+ "{0} Stdev [Mpps]".format(table["compare"]["title"]),
+ "Change [%]"])
header_str = ",".join(header) + "\n"
except (AttributeError, KeyError) as err:
logging.error("The model is invalid, missing parameter: {0}".
@@ -396,27 +419,68 @@ def table_performance_comparison(table, input_data):
pass
except TypeError:
tbl_dict.pop(tst_name, None)
+ if history:
+ for item in history:
+ for job, builds in item["data"].items():
+ for build in builds:
+ for tst_name, tst_data in data[job][str(build)].iteritems():
+ if tbl_dict.get(tst_name, None) is None:
+ continue
+ if tbl_dict[tst_name].get("history", None) is None:
+ tbl_dict[tst_name]["history"] = OrderedDict()
+ if tbl_dict[tst_name]["history"].get(item["title"],
+ None) is None:
+ tbl_dict[tst_name]["history"][item["title"]] = \
+ list()
+ try:
+ tbl_dict[tst_name]["history"][item["title"]].\
+ append(tst_data["throughput"]["value"])
+ except (TypeError, KeyError):
+ pass
tbl_lst = list()
for tst_name in tbl_dict.keys():
item = [tbl_dict[tst_name]["name"], ]
+ if history:
+ if tbl_dict[tst_name].get("history", None) is not None:
+ for hist_data in tbl_dict[tst_name]["history"].values():
+ if hist_data:
+ data_t = remove_outliers(
+ hist_data, outlier_const=table["outlier-const"])
+ if data_t:
+ item.append(round(mean(data_t) / 1000000, 2))
+ item.append(round(stdev(data_t) / 1000000, 2))
+ else:
+ item.extend([None, None])
+ else:
+ item.extend([None, None])
+ else:
+ item.extend([None, None])
if tbl_dict[tst_name]["ref-data"]:
data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
- table["outlier-const"])
- item.append(round(mean(data_t) / 1000000, 2))
- item.append(round(stdev(data_t) / 1000000, 2))
+ outlier_const=table["outlier-const"])
+ # TODO: Specify window size.
+ if data_t:
+ item.append(round(mean(data_t) / 1000000, 2))
+ item.append(round(stdev(data_t) / 1000000, 2))
+ else:
+ item.extend([None, None])
else:
item.extend([None, None])
if tbl_dict[tst_name]["cmp-data"]:
data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
- table["outlier-const"])
- item.append(round(mean(data_t) / 1000000, 2))
- item.append(round(stdev(data_t) / 1000000, 2))
+ outlier_const=table["outlier-const"])
+ # TODO: Specify window size.
+ if data_t:
+ item.append(round(mean(data_t) / 1000000, 2))
+ item.append(round(stdev(data_t) / 1000000, 2))
+ else:
+ item.extend([None, None])
else:
item.extend([None, None])
- if item[1] is not None and item[3] is not None:
- item.append(int(relative_change(float(item[1]), float(item[3]))))
- if len(item) == 6:
+ if item[-4] is not None and item[-2] is not None and item[-4] != 0:
+ item.append(int(relative_change(float(item[-4]), float(item[-2]))))
+ if len(item) == len(header):
tbl_lst.append(item)
# Sort the table according to the relative change
@@ -438,7 +502,7 @@ def table_performance_comparison(table, input_data):
table["output-file-ext"])
]
for file_name in tbl_names:
- logging.info(" Writing file: '{}'".format(file_name))
+ logging.info(" Writing file: '{0}'".format(file_name))
with open(file_name, "w") as file_handler:
file_handler.write(header_str)
for test in tbl_lst:
@@ -459,7 +523,7 @@ def table_performance_comparison(table, input_data):
for i, txt_name in enumerate(tbl_names_txt):
txt_table = None
- logging.info(" Writing file: '{}'".format(txt_name))
+ logging.info(" Writing file: '{0}'".format(txt_name))
with open(tbl_names[i], 'rb') as csv_file:
csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
for row in csv_content:
@@ -481,7 +545,7 @@ def table_performance_comparison(table, input_data):
output_file = "{0}-ndr-1t1c-top{1}".format(table["output-file"],
table["output-file-ext"])
- logging.info(" Writing file: '{}'".format(output_file))
+ logging.info(" Writing file: '{0}'".format(output_file))
with open(output_file, "w") as out_file:
out_file.write(header_str)
for i, line in enumerate(lines[1:]):
@@ -491,7 +555,7 @@ def table_performance_comparison(table, input_data):
output_file = "{0}-ndr-1t1c-bottom{1}".format(table["output-file"],
table["output-file-ext"])
- logging.info(" Writing file: '{}'".format(output_file))
+ logging.info(" Writing file: '{0}'".format(output_file))
with open(output_file, "w") as out_file:
out_file.write(header_str)
for i, line in enumerate(lines[-1:0:-1]):
@@ -508,7 +572,7 @@ def table_performance_comparison(table, input_data):
output_file = "{0}-pdr-1t1c-top{1}".format(table["output-file"],
table["output-file-ext"])
- logging.info(" Writing file: '{}'".format(output_file))
+ logging.info(" Writing file: '{0}'".format(output_file))
with open(output_file, "w") as out_file:
out_file.write(header_str)
for i, line in enumerate(lines[1:]):
@@ -518,10 +582,424 @@ def table_performance_comparison(table, input_data):
output_file = "{0}-pdr-1t1c-bottom{1}".format(table["output-file"],
table["output-file-ext"])
- logging.info(" Writing file: '{}'".format(output_file))
+ logging.info(" Writing file: '{0}'".format(output_file))
with open(output_file, "w") as out_file:
out_file.write(header_str)
for i, line in enumerate(lines[-1:0:-1]):
if i == table["nr-of-tests-shown"]:
break
out_file.write(line)
+
+
+def table_performance_comparison_mrr(table, input_data):
+ """Generate the table(s) with algorithm: table_performance_comparison_mrr
+ specified in the specification file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+
+ logging.info(" Generating the table {0} ...".
+ format(table.get("title", "")))
+
+ # Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
+ data = input_data.filter_data(table, continue_on_error=True)
+
+ # Prepare the header of the tables
+ try:
+ header = ["Test case",
+ "{0} Throughput [Mpps]".format(table["reference"]["title"]),
+ "{0} stdev [Mpps]".format(table["reference"]["title"]),
+ "{0} Throughput [Mpps]".format(table["compare"]["title"]),
+ "{0} stdev [Mpps]".format(table["compare"]["title"]),
+ "Change [%]"]
+ header_str = ",".join(header) + "\n"
+ except (AttributeError, KeyError) as err:
+ logging.error("The model is invalid, missing parameter: {0}".
+ format(err))
+ return
+
+ # Prepare data to the table:
+ tbl_dict = dict()
+ for job, builds in table["reference"]["data"].items():
+ for build in builds:
+ for tst_name, tst_data in data[job][str(build)].iteritems():
+ if tbl_dict.get(tst_name, None) is None:
+ name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
+ "-".join(tst_data["name"].
+ split("-")[1:]))
+ tbl_dict[tst_name] = {"name": name,
+ "ref-data": list(),
+ "cmp-data": list()}
+ try:
+ tbl_dict[tst_name]["ref-data"].\
+ append(tst_data["result"]["throughput"])
+ except TypeError:
+ pass # No data in output.xml for this test
+
+ for job, builds in table["compare"]["data"].items():
+ for build in builds:
+ for tst_name, tst_data in data[job][str(build)].iteritems():
+ try:
+ tbl_dict[tst_name]["cmp-data"].\
+ append(tst_data["result"]["throughput"])
+ except KeyError:
+ pass
+ except TypeError:
+ tbl_dict.pop(tst_name, None)
+
+ tbl_lst = list()
+ for tst_name in tbl_dict.keys():
+ item = [tbl_dict[tst_name]["name"], ]
+ if tbl_dict[tst_name]["ref-data"]:
+ data_t = remove_outliers(tbl_dict[tst_name]["ref-data"],
+ outlier_const=table["outlier-const"])
+ # TODO: Specify window size.
+ if data_t:
+ item.append(round(mean(data_t) / 1000000, 2))
+ item.append(round(stdev(data_t) / 1000000, 2))
+ else:
+ item.extend([None, None])
+ else:
+ item.extend([None, None])
+ if tbl_dict[tst_name]["cmp-data"]:
+ data_t = remove_outliers(tbl_dict[tst_name]["cmp-data"],
+ outlier_const=table["outlier-const"])
+ # TODO: Specify window size.
+ if data_t:
+ item.append(round(mean(data_t) / 1000000, 2))
+ item.append(round(stdev(data_t) / 1000000, 2))
+ else:
+ item.extend([None, None])
+ else:
+ item.extend([None, None])
+ if item[1] is not None and item[3] is not None and item[1] != 0:
+ item.append(int(relative_change(float(item[1]), float(item[3]))))
+ if len(item) == 6:
+ tbl_lst.append(item)
+
+ # Sort the table according to the relative change
+ tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
+
+ # Generate tables:
+ # All tests in csv:
+ tbl_names = ["{0}-1t1c-full{1}".format(table["output-file"],
+ table["output-file-ext"]),
+ "{0}-2t2c-full{1}".format(table["output-file"],
+ table["output-file-ext"]),
+ "{0}-4t4c-full{1}".format(table["output-file"],
+ table["output-file-ext"])
+ ]
+ for file_name in tbl_names:
+ logging.info(" Writing file: '{0}'".format(file_name))
+ with open(file_name, "w") as file_handler:
+ file_handler.write(header_str)
+ for test in tbl_lst:
+ if file_name.split("-")[-2] in test[0]: # cores
+ test[0] = "-".join(test[0].split("-")[:-1])
+ file_handler.write(",".join([str(item) for item in test]) +
+ "\n")
+
+ # All tests in txt:
+ tbl_names_txt = ["{0}-1t1c-full.txt".format(table["output-file"]),
+ "{0}-2t2c-full.txt".format(table["output-file"]),
+ "{0}-4t4c-full.txt".format(table["output-file"])
+ ]
+
+ for i, txt_name in enumerate(tbl_names_txt):
+ txt_table = None
+ logging.info(" Writing file: '{0}'".format(txt_name))
+ with open(tbl_names[i], 'rb') as csv_file:
+ csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
+ for row in csv_content:
+ if txt_table is None:
+ txt_table = prettytable.PrettyTable(row)
+ else:
+ txt_table.add_row(row)
+ txt_table.align["Test case"] = "l"
+ with open(txt_name, "w") as txt_file:
+ txt_file.write(str(txt_table))
+
+
+def table_performance_trending_dashboard(table, input_data):
+ """Generate the table(s) with algorithm: table_performance_comparison
+ specified in the specification file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+
+ logging.info(" Generating the table {0} ...".
+ format(table.get("title", "")))
+
+ # Transform the data
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(table.get("type", ""), table.get("title", "")))
+ data = input_data.filter_data(table, continue_on_error=True)
+
+ # Prepare the header of the tables
+ header = ["Test Case",
+ "Trend [Mpps]",
+ "Short-Term Change [%]",
+ "Long-Term Change [%]",
+ "Regressions [#]",
+ "Progressions [#]",
+ "Outliers [#]"
+ ]
+ header_str = ",".join(header) + "\n"
+
+ # Prepare data to the table:
+ tbl_dict = dict()
+ for job, builds in table["data"].items():
+ for build in builds:
+ for tst_name, tst_data in data[job][str(build)].iteritems():
+ if tst_name.lower() in table["ignore-list"]:
+ continue
+ if tbl_dict.get(tst_name, None) is None:
+ name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
+ "-".join(tst_data["name"].
+ split("-")[1:]))
+ tbl_dict[tst_name] = {"name": name,
+ "data": OrderedDict()}
+ try:
+ tbl_dict[tst_name]["data"][str(build)] = \
+ tst_data["result"]["throughput"]
+ except (TypeError, KeyError):
+ pass # No data in output.xml for this test
+
+ tbl_lst = list()
+ for tst_name in tbl_dict.keys():
+ if len(tbl_dict[tst_name]["data"]) < 3:
+ continue
+
+ pd_data = pd.Series(tbl_dict[tst_name]["data"])
+ data_t, _ = split_outliers(pd_data, outlier_const=1.5,
+ window=table["window"])
+ last_key = data_t.keys()[-1]
+ win_size = min(data_t.size, table["window"])
+ win_first_idx = data_t.size - win_size
+ key_14 = data_t.keys()[win_first_idx]
+ long_win_size = min(data_t.size, table["long-trend-window"])
+ median_t = data_t.rolling(window=win_size, min_periods=2).median()
+ median_first_idx = median_t.size - long_win_size
+ try:
+ max_median = max(
+ [x for x in median_t.values[median_first_idx:-win_size]
+ if not isnan(x)])
+ except ValueError:
+ max_median = nan
+ try:
+ last_median_t = median_t[last_key]
+ except KeyError:
+ last_median_t = nan
+ try:
+ median_t_14 = median_t[key_14]
+ except KeyError:
+ median_t_14 = nan
+
+ if isnan(last_median_t) or isnan(median_t_14) or median_t_14 == 0.0:
+ rel_change_last = nan
+ else:
+ rel_change_last = round(
+ ((last_median_t - median_t_14) / median_t_14) * 100, 2)
+
+ if isnan(max_median) or isnan(last_median_t) or max_median == 0.0:
+ rel_change_long = nan
+ else:
+ rel_change_long = round(
+ ((last_median_t - max_median) / max_median) * 100, 2)
+
+ # Classification list:
+ classification_lst = classify_anomalies(data_t, window=14)
+
+ if classification_lst:
+ tbl_lst.append(
+ [tbl_dict[tst_name]["name"],
+ '-' if isnan(last_median_t) else
+ round(last_median_t / 1000000, 2),
+ '-' if isnan(rel_change_last) else rel_change_last,
+ '-' if isnan(rel_change_long) else rel_change_long,
+ classification_lst[win_first_idx:].count("regression"),
+ classification_lst[win_first_idx:].count("progression"),
+ classification_lst[win_first_idx:].count("outlier")])
+
+ tbl_lst.sort(key=lambda rel: rel[0])
+
+ tbl_sorted = list()
+ for nrr in range(table["window"], -1, -1):
+ tbl_reg = [item for item in tbl_lst if item[4] == nrr]
+ for nrp in range(table["window"], -1, -1):
+ tbl_pro = [item for item in tbl_reg if item[5] == nrp]
+ for nro in range(table["window"], -1, -1):
+ tbl_out = [item for item in tbl_pro if item[6] == nro]
+ tbl_out.sort(key=lambda rel: rel[2])
+ tbl_sorted.extend(tbl_out)
+
+ file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
+
+ logging.info(" Writing file: '{0}'".format(file_name))
+ with open(file_name, "w") as file_handler:
+ file_handler.write(header_str)
+ for test in tbl_sorted:
+ file_handler.write(",".join([str(item) for item in test]) + '\n')
+
+ txt_file_name = "{0}.txt".format(table["output-file"])
+ txt_table = None
+ logging.info(" Writing file: '{0}'".format(txt_file_name))
+ with open(file_name, 'rb') as csv_file:
+ csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
+ for row in csv_content:
+ if txt_table is None:
+ txt_table = prettytable.PrettyTable(row)
+ else:
+ txt_table.add_row(row)
+ txt_table.align["Test case"] = "l"
+ with open(txt_file_name, "w") as txt_file:
+ txt_file.write(str(txt_table))
+
+
+def table_performance_trending_dashboard_html(table, input_data):
+ """Generate the table(s) with algorithm:
+ table_performance_trending_dashboard_html specified in the specification
+ file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+
+ logging.info(" Generating the table {0} ...".
+ format(table.get("title", "")))
+
+ try:
+ with open(table["input-file"], 'rb') as csv_file:
+ csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
+ csv_lst = [item for item in csv_content]
+ except KeyError:
+ logging.warning("The input file is not defined.")
+ return
+ except csv.Error as err:
+ logging.warning("Not possible to process the file '{0}'.\n{1}".
+ format(table["input-file"], err))
+ return
+
+ # Table:
+ dashboard = ET.Element("table", attrib=dict(width="100%", border='0'))
+
+ # Table header:
+ tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor="#7eade7"))
+ for idx, item in enumerate(csv_lst[0]):
+ alignment = "left" if idx == 0 else "center"
+ th = ET.SubElement(tr, "th", attrib=dict(align=alignment))
+ th.text = item
+
+ # Rows:
+ colors = {"regression": ("#ffcccc", "#ff9999"),
+ "progression": ("#c6ecc6", "#9fdf9f"),
+ "outlier": ("#e6e6e6", "#cccccc"),
+ "normal": ("#e9f1fb", "#d4e4f7")}
+ for r_idx, row in enumerate(csv_lst[1:]):
+ if int(row[4]):
+ color = "regression"
+ elif int(row[5]):
+ color = "progression"
+ elif int(row[6]):
+ color = "outlier"
+ else:
+ color = "normal"
+ background = colors[color][r_idx % 2]
+ tr = ET.SubElement(dashboard, "tr", attrib=dict(bgcolor=background))
+
+ # Columns:
+ for c_idx, item in enumerate(row):
+ alignment = "left" if c_idx == 0 else "center"
+ td = ET.SubElement(tr, "td", attrib=dict(align=alignment))
+ # Name:
+ url = "../trending/"
+ file_name = ""
+ anchor = "#"
+ feature = ""
+ if c_idx == 0:
+ if "memif" in item:
+ file_name = "container_memif.html"
+
+ elif "srv6" in item:
+ file_name = "srv6.html"
+
+ elif "vhost" in item:
+ if "l2xcbase" in item or "l2bdbasemaclrn" in item:
+ file_name = "vm_vhost_l2.html"
+ elif "ip4base" in item:
+ file_name = "vm_vhost_ip4.html"
+
+ elif "ipsec" in item:
+ file_name = "ipsec.html"
+
+ elif "ethip4lispip" in item or "ethip4vxlan" in item:
+ file_name = "ip4_tunnels.html"
+
+ elif "ip4base" in item or "ip4scale" in item:
+ file_name = "ip4.html"
+ if "iacl" in item or "snat" in item or "cop" in item:
+ feature = "-features"
+
+ elif "ip6base" in item or "ip6scale" in item:
+ file_name = "ip6.html"
+
+ elif "l2xcbase" in item or "l2xcscale" in item \
+ or "l2bdbasemaclrn" in item or "l2bdscale" in item \
+ or "l2dbbasemaclrn" in item or "l2dbscale" in item:
+ file_name = "l2.html"
+ if "iacl" in item:
+ feature = "-features"
+
+ if "x520" in item:
+ anchor += "x520-"
+ elif "x710" in item:
+ anchor += "x710-"
+ elif "xl710" in item:
+ anchor += "xl710-"
+
+ if "64b" in item:
+ anchor += "64b-"
+ elif "78b" in item:
+ anchor += "78b-"
+ elif "imix" in item:
+ anchor += "imix-"
+ elif "9000b" in item:
+ anchor += "9000b-"
+ elif "1518" in item:
+ anchor += "1518b-"
+
+ if "1t1c" in item:
+ anchor += "1t1c"
+ elif "2t2c" in item:
+ anchor += "2t2c"
+ elif "4t4c" in item:
+ anchor += "4t4c"
+
+ url = url + file_name + anchor + feature
+
+ ref = ET.SubElement(td, "a", attrib=dict(href=url))
+ ref.text = item
+
+ if c_idx > 0:
+ td.text = item
+
+ try:
+ with open(table["output-file"], 'w') as html_file:
+ logging.info(" Writing file: '{0}'".
+ format(table["output-file"]))
+ html_file.write(".. raw:: html\n\n\t")
+ html_file.write(ET.tostring(dashboard))
+ html_file.write("\n\t<p><br><br></p>\n")
+ except KeyError:
+ logging.warning("The output file is not defined.")
+ return
diff --git a/resources/tools/presentation/input_data_files.py b/resources/tools/presentation/input_data_files.py
index d81f64fbe6..cde6d1acc4 100644
--- a/resources/tools/presentation/input_data_files.py
+++ b/resources/tools/presentation/input_data_files.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 Cisco and/or its affiliates.
+# Copyright (c) 2018 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -16,12 +16,9 @@ Download all data.
"""
import re
-import gzip
-import logging
-from os import rename, remove
-from os.path import join, getsize
-from shutil import move
+from os import rename, mkdir
+from os.path import join
from zipfile import ZipFile, is_zipfile, BadZipfile
from httplib import responses
from requests import get, codes, RequestException, Timeout, TooManyRedirects, \
@@ -39,216 +36,195 @@ SEPARATOR = "__"
REGEX_RELEASE = re.compile(r'(\D*)(\d{4}|master)(\D*)')
-def download_data_files(spec):
- """Download all data specified in the specification file in the section
- type: input --> builds.
+def _download_file(url, file_name, log):
+ """Download a file with input data.
- :param spec: Specification.
+ :param url: URL to the file to download.
+ :param file_name: Name of file to download.
+ :param log: List of log messages.
+ :type url: str
+ :type file_name: str
+ :type log: list of tuples (severity, msg)
+ :returns: True if the download was successful, otherwise False.
+ :rtype: bool
+ """
+
+ success = False
+ try:
+ log.append(("INFO", " Connecting to '{0}' ...".format(url)))
+
+ response = get(url, stream=True)
+ code = response.status_code
+
+ log.append(("INFO", " {0}: {1}".format(code, responses[code])))
+
+ if code != codes["OK"]:
+ return False
+
+ log.append(("INFO", " Downloading the file '{0}' to '{1}' ...".
+ format(url, file_name)))
+
+ file_handle = open(file_name, "wb")
+ for chunk in response.iter_content(chunk_size=CHUNK_SIZE):
+ if chunk:
+ file_handle.write(chunk)
+ file_handle.close()
+ success = True
+ except ConnectionError as err:
+ log.append(("ERROR", "Not possible to connect to '{0}'.".format(url)))
+ log.append(("DEBUG", str(err)))
+ except HTTPError as err:
+ log.append(("ERROR", "Invalid HTTP response from '{0}'.".format(url)))
+ log.append(("DEBUG", str(err)))
+ except TooManyRedirects as err:
+ log.append(("ERROR", "Request exceeded the configured number "
+ "of maximum re-directions."))
+ log.append(("DEBUG", str(err)))
+ except Timeout as err:
+ log.append(("ERROR", "Request timed out."))
+ log.append(("DEBUG", str(err)))
+ except RequestException as err:
+ log.append(("ERROR", "Unexpected HTTP request exception."))
+ log.append(("DEBUG", str(err)))
+ except (IOError, ValueError, KeyError) as err:
+ log.append(("ERROR", "Download failed."))
+ log.append(("DEBUG", str(err)))
+
+ log.append(("INFO", " Download finished."))
+ return success
+
+
+def _unzip_file(spec, build, pid, log):
+ """Unzip downloaded source file.
+
+ :param spec: Specification read form the specification file.
+ :param build: Information about the build.
+ :param log: List of log messages.
:type spec: Specification
- :raises: PresentationError if there is no url defined for the job.
+ :type build: dict
+ :type log: list of tuples (severity, msg)
+ :returns: True if the download was successful, otherwise False.
+ :rtype: bool
"""
- for job, builds in spec.builds.items():
- for build in builds:
- if job.startswith("csit-"):
- if spec.input["file-name"].endswith(".zip"):
- url = spec.environment["urls"]["URL[JENKINS,CSIT]"]
- elif spec.input["file-name"].endswith(".gz"):
- url = spec.environment["urls"]["URL[NEXUS,LOG]"]
- else:
- logging.error("Not supported file format.")
- continue
- elif job.startswith("hc2vpp-"):
- url = spec.environment["urls"]["URL[JENKINS,HC]"]
- else:
- raise PresentationError("No url defined for the job '{}'.".
- format(job))
- file_name = spec.input["file-name"]
- full_name = spec.input["download-path"].\
- format(job=job, build=build["build"], filename=file_name)
- url = "{0}/{1}".format(url, full_name)
- new_name = join(
- spec.environment["paths"]["DIR[WORKING,DATA]"],
- "{job}{sep}{build}{sep}{name}".format(job=job,
- sep=SEPARATOR,
- build=build["build"],
- name=file_name))
- logging.info(
- "Downloading the file '{0}' to '{1}' ...".format(url, new_name))
-
- status = "failed"
- try:
- response = get(url, stream=True)
- code = response.status_code
-
- if code != codes["OK"]:
- logging.warning(
- "Jenkins: {0}: {1}.".format(code, responses[code]))
- logging.info("Trying to download from Nexus:")
- spec.set_input_state(job, build["build"], "not found")
- if code == codes["not_found"]:
- release = re.search(REGEX_RELEASE, job).group(2)
- nexus_file_name = "{job}{sep}{build}{sep}{name}".\
- format(job=job, sep=SEPARATOR, build=build["build"],
- name=file_name)
- try:
- release = "rls{0}".format(int(release))
- except ValueError:
- pass
- url = "{url}/{release}/{dir}/{file}".\
- format(url=spec.environment["urls"]["URL[NEXUS]"],
- release=release,
- dir=spec.environment["urls"]["DIR[NEXUS]"],
- file=nexus_file_name)
- logging.info("Downloading the file '{0}' to '{1}' ...".
- format(url, new_name))
- response = get(url, stream=True)
- code = response.status_code
- if code != codes["OK"]:
- logging.error(
- "Nexus: {0}: {1}".format(code, responses[code]))
- spec.set_input_state(
- job, build["build"], "not found")
- continue
-
- file_handle = open(new_name, "wb")
- for chunk in response.iter_content(chunk_size=CHUNK_SIZE):
- if chunk:
- file_handle.write(chunk)
- file_handle.close()
-
- if spec.input["file-name"].endswith(".zip"):
- expected_length = None
- try:
- expected_length = int(response.
- headers["Content-Length"])
- logging.debug(" Expected file size: {0}B".
- format(expected_length))
- except KeyError:
- logging.debug(" No information about expected size.")
-
- real_length = getsize(new_name)
- logging.debug(" Downloaded size: {0}B".format(real_length))
-
- if expected_length:
- if real_length == expected_length:
- status = "downloaded"
- logging.info("{0}: {1}".format(code,
- responses[code]))
- else:
- logging.error("The file size differs from the "
- "expected size.")
- else:
- status = "downloaded"
- logging.info("{0}: {1}".format(code, responses[code]))
-
- elif spec.input["file-name"].endswith(".gz"):
- if "docs.fd.io" in url:
- execute_command("gzip --decompress --keep --force {0}".
- format(new_name))
- else:
- rename(new_name, new_name[:-3])
- execute_command("gzip --keep {0}".format(new_name[:-3]))
- new_name = new_name[:-3]
- status = "downloaded"
- logging.info("{0}: {1}".format(code, responses[code]))
-
- except ConnectionError as err:
- logging.error("Not possible to connect to '{0}'.".format(url))
- logging.debug(err)
- except HTTPError as err:
- logging.error("Invalid HTTP response from '{0}'.".format(url))
- logging.debug(err)
- except TooManyRedirects as err:
- logging.error("Request exceeded the configured number "
- "of maximum re-directions.")
- logging.debug(err)
- except Timeout as err:
- logging.error("Request timed out.")
- logging.debug(err)
- except RequestException as err:
- logging.error("Unexpected HTTP request exception.")
- logging.debug(err)
- except (IOError, ValueError, KeyError) as err:
- logging.error("Download failed.")
- logging.debug("Reason: {0}".format(err))
-
- spec.set_input_state(job, build["build"], status)
- spec.set_input_file_name(job, build["build"], new_name)
-
- if status == "failed":
- logging.info("Removing the file '{0}'".format(new_name))
- try:
- remove(new_name)
- except OSError as err:
- logging.warning(str(err))
- spec.set_input_file_name(job, build["build"], None)
-
- unzip_files(spec)
-
-
-def unzip_files(spec):
- """Unzip downloaded zip files
-
- :param spec: Specification.
+ data_file = spec.input["extract"]
+ file_name = build["file-name"]
+ directory = spec.environment["paths"]["DIR[WORKING,DATA]"]
+ tmp_dir = join(directory, str(pid))
+ try:
+ mkdir(tmp_dir)
+ except OSError:
+ pass
+ new_name = "{0}{1}{2}".format(file_name.rsplit('.')[-2],
+ SEPARATOR,
+ data_file.split("/")[-1])
+
+ log.append(("INFO", " Unzipping: '{0}' from '{1}'.".
+ format(data_file, file_name)))
+ try:
+ with ZipFile(file_name, 'r') as zip_file:
+ zip_file.extract(data_file, tmp_dir)
+ log.append(("INFO", " Renaming the file '{0}' to '{1}'".
+ format(join(tmp_dir, data_file), new_name)))
+ rename(join(tmp_dir, data_file), new_name)
+ build["file-name"] = new_name
+ return True
+ except (BadZipfile, RuntimeError) as err:
+ log.append(("ERROR", "Failed to unzip the file '{0}': {1}.".
+ format(file_name, str(err))))
+ return False
+ except OSError as err:
+ log.append(("ERROR", "Failed to rename the file '{0}': {1}.".
+ format(data_file, str(err))))
+ return False
+
+
+def download_and_unzip_data_file(spec, job, build, pid, log):
+ """Download and unzip a source file.
+
+ :param spec: Specification read form the specification file.
+ :param job: Name of the Jenkins job.
+ :param build: Information about the build.
+ :param pid: PID of the process executing this method.
+ :param log: List of log messages.
:type spec: Specification
- :raises: PresentationError if the zip file does not exist or it is not a
- zip file.
+ :type job: str
+ :type build: dict
+ :type pid: int
+ :type log: list of tuples (severity, msg)
+ :returns: True if the download was successful, otherwise False.
+ :rtype: bool
"""
- if spec.is_debug:
- data_file = spec.debug["extract"]
+ if job.startswith("csit-"):
+ if spec.input["file-name"].endswith(".zip"):
+ url = spec.environment["urls"]["URL[JENKINS,CSIT]"]
+ elif spec.input["file-name"].endswith(".gz"):
+ url = spec.environment["urls"]["URL[NEXUS,LOG]"]
+ else:
+ log.append(("ERROR", "Not supported file format."))
+ return False
+ elif job.startswith("hc2vpp-"):
+ url = spec.environment["urls"]["URL[JENKINS,HC]"]
else:
- data_file = spec.input["extract"]
-
- for job, builds in spec.builds.items():
- for build in builds:
- if build["status"] == "failed" or build["status"] == "not found":
- continue
+ raise PresentationError("No url defined for the job '{}'.".
+ format(job))
+ file_name = spec.input["file-name"]
+ full_name = spec.input["download-path"]. \
+ format(job=job, build=build["build"], filename=file_name)
+ url = "{0}/{1}".format(url, full_name)
+ new_name = join(spec.environment["paths"]["DIR[WORKING,DATA]"],
+ "{job}{sep}{build}{sep}{name}".
+ format(job=job, sep=SEPARATOR, build=build["build"],
+ name=file_name))
+ # Download the file from the defined source (Jenkins, logs.fd.io):
+ success = _download_file(url, new_name, log)
+
+ if success and new_name.endswith(".zip"):
+ if not is_zipfile(new_name):
+ success = False
+
+ # If not successful, download from docs.fd.io:
+ if not success:
+ log.append(("INFO", " Trying to download from https://docs.fd.io:"))
+ release = re.search(REGEX_RELEASE, job).group(2)
+ for rls in (release, "master"):
+ nexus_file_name = "{job}{sep}{build}{sep}{name}". \
+ format(job=job, sep=SEPARATOR, build=build["build"],
+ name=file_name)
try:
- status = "failed"
- directory = spec.environment["paths"]["DIR[WORKING,DATA]"]
- file_name = join(build["file-name"])
-
- if build["status"] == "downloaded":
- logging.info("Unziping: '{0}' from '{1}'.".
- format(data_file, file_name))
- new_name = "{0}{1}{2}".format(file_name.rsplit('.')[-2],
- SEPARATOR,
- data_file.split("/")[-1])
- try:
- if is_zipfile(file_name):
- with ZipFile(file_name, 'r') as zip_file:
- zip_file.extract(data_file, directory)
- logging.info("Moving {0} to {1} ...".
- format(join(directory, data_file),
- directory))
- move(join(directory, data_file), directory)
- logging.info("Renaming the file '{0}' to '{1}'".
- format(join(directory,
- data_file.split("/")[-1]),
- new_name))
- rename(join(directory, data_file.split("/")[-1]),
- new_name)
- spec.set_input_file_name(job, build["build"],
- new_name)
- status = "unzipped"
- spec.set_input_state(job, build["build"], status)
- except (BadZipfile, RuntimeError) as err:
- logging.error("Failed to unzip the file '{0}': {1}.".
- format(file_name, str(err)))
- except OSError as err:
- logging.error("Failed to rename the file '{0}': {1}.".
- format(data_file, str(err)))
- finally:
- if status == "failed":
- spec.set_input_file_name(job, build["build"], None)
- else:
- raise PresentationError("The file '{0}' does not exist or "
- "it is not a zip file".
- format(file_name))
-
- spec.set_input_state(job, build["build"], status)
-
- except KeyError:
+ rls = "rls{0}".format(int(rls))
+ except ValueError:
pass
+ url = "{url}/{release}/{dir}/{file}". \
+ format(url=spec.environment["urls"]["URL[NEXUS]"],
+ release=rls,
+ dir=spec.environment["urls"]["DIR[NEXUS]"],
+ file=nexus_file_name)
+ success = _download_file(url, new_name, log)
+ if success:
+ break
+
+ if success:
+ build["file-name"] = new_name
+ else:
+ return False
+
+ if spec.input["file-name"].endswith(".gz"):
+ if "docs.fd.io" in url:
+ execute_command("gzip --decompress --keep --force {0}".
+ format(new_name))
+ else:
+ rename(new_name, new_name[:-3])
+ execute_command("gzip --keep {0}".format(new_name[:-3]))
+ build["file-name"] = new_name[:-3]
+
+ if new_name.endswith(".zip"):
+ if is_zipfile(new_name):
+ return _unzip_file(spec, build, pid, log)
+ else:
+ log.append(("ERROR",
+ "Zip file '{0}' is corrupted.".format(new_name)))
+ return False
+ else:
+ return True
diff --git a/resources/tools/presentation/input_data_parser.py b/resources/tools/presentation/input_data_parser.py
index 7adc9c085b..beec34c106 100644
--- a/resources/tools/presentation/input_data_parser.py
+++ b/resources/tools/presentation/input_data_parser.py
@@ -18,15 +18,20 @@
- provide access to the data.
"""
+import multiprocessing
+import os
import re
import pandas as pd
import logging
-import xml.etree.ElementTree as ET
from robot.api import ExecutionResult, ResultVisitor
from robot import errors
from collections import OrderedDict
from string import replace
+from os import remove
+
+from input_data_files import download_and_unzip_data_file
+from utils import Worker
class ExecutionChecker(ResultVisitor):
@@ -171,14 +176,14 @@ class ExecutionChecker(ResultVisitor):
REGEX_TOLERANCE = re.compile(r'^[\D\d]*LOSS_ACCEPTANCE:\s(\d*\.\d*)\s'
r'[\D\d]*')
- REGEX_VERSION = re.compile(r"(stdout: 'vat# vat# Version:)(\s*)(.*)")
+ REGEX_VERSION = re.compile(r"(return STDOUT Version:\s*)(.*)")
REGEX_TCP = re.compile(r'Total\s(rps|cps|throughput):\s([0-9]*).*$')
REGEX_MRR = re.compile(r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
r'tx\s(\d*),\srx\s(\d*)')
- def __init__(self, **metadata):
+ def __init__(self, metadata):
"""Initialisation.
:param metadata: Key-value pairs to be included in "metadata" part of
@@ -244,14 +249,13 @@ class ExecutionChecker(ResultVisitor):
:returns: Nothing.
"""
- if msg.message.count("stdout: 'vat# vat# Version:"):
+ if msg.message.count("return STDOUT Version:"):
self._version = str(re.search(self.REGEX_VERSION, msg.message).
- group(3))
+ group(2))
self._data["metadata"]["version"] = self._version
+ self._data["metadata"]["generated"] = msg.timestamp
self._msg_type = None
- logging.debug(" VPP version: {0}".format(self._version))
-
def _get_vat_history(self, msg):
"""Called when extraction of VAT command history is required.
@@ -585,7 +589,7 @@ class ExecutionChecker(ResultVisitor):
:type setup_kw: Keyword
:returns: Nothing.
"""
- if setup_kw.name.count("Vpp Show Version Verbose") \
+ if setup_kw.name.count("Show Vpp Version On All Duts") \
and not self._version:
self._msg_type = "setup-version"
setup_kw.messages.visit(self)
@@ -696,7 +700,7 @@ class InputData(object):
self._cfg = spec
# Data store:
- self._input_data = None
+ self._input_data = pd.Series()
@property
def data(self):
@@ -747,76 +751,186 @@ class InputData(object):
return self.data[job][build]["tests"]
@staticmethod
- def _parse_tests(job, build):
+ def _parse_tests(job, build, log):
"""Process data from robot output.xml file and return JSON structured
data.
:param job: The name of job which build output data will be processed.
:param build: The build which output data will be processed.
+ :param log: List of log messages.
:type job: str
:type build: dict
+ :type log: list of tuples (severity, msg)
:returns: JSON data structure.
:rtype: dict
"""
- tree = ET.parse(build["file-name"])
- root = tree.getroot()
- generated = root.attrib["generated"]
+ metadata = {
+ "job": job,
+ "build": build
+ }
with open(build["file-name"], 'r') as data_file:
try:
result = ExecutionResult(data_file)
except errors.DataError as err:
- logging.error("Error occurred while parsing output.xml: {0}".
- format(err))
+ log.append(("ERROR", "Error occurred while parsing output.xml: "
+ "{0}".format(err)))
return None
- checker = ExecutionChecker(job=job, build=build, generated=generated)
+ checker = ExecutionChecker(metadata)
result.visit(checker)
return checker.data
- def read_data(self):
- """Parse input data from input files and store in pandas' Series.
+ def _download_and_parse_build(self, pid, data_queue, job, build, repeat):
+ """Download and parse the input data file.
+
+ :param pid: PID of the process executing this method.
+ :param data_queue: Shared memory between processes. Queue which keeps
+ the result data. This data is then read by the main process and used
+ in further processing.
+ :param job: Name of the Jenkins job which generated the processed input
+ file.
+ :param build: Information about the Jenkins build which generated the
+ processed input file.
+ :param repeat: Repeat the download specified number of times if not
+ successful.
+ :type pid: int
+ :type data_queue: multiprocessing.Manager().Queue()
+ :type job: str
+ :type build: dict
+ :type repeat: int
+ """
+
+ logs = list()
+
+ logging.info(" Processing the job/build: {0}: {1}".
+ format(job, build["build"]))
+
+ logs.append(("INFO", " Processing the job/build: {0}: {1}".
+ format(job, build["build"])))
+
+ state = "failed"
+ success = False
+ data = None
+ do_repeat = repeat
+ while do_repeat:
+ success = download_and_unzip_data_file(self._cfg, job, build, pid,
+ logs)
+ if success:
+ break
+ do_repeat -= 1
+ if not success:
+ logs.append(("ERROR", "It is not possible to download the input "
+ "data file from the job '{job}', build "
+ "'{build}', or it is damaged. Skipped.".
+ format(job=job, build=build["build"])))
+ if success:
+ logs.append(("INFO", " Processing data from the build '{0}' ...".
+ format(build["build"])))
+ data = InputData._parse_tests(job, build, logs)
+ if data is None:
+ logs.append(("ERROR", "Input data file from the job '{job}', "
+ "build '{build}' is damaged. Skipped.".
+ format(job=job, build=build["build"])))
+ else:
+ state = "processed"
+
+ try:
+ remove(build["file-name"])
+ except OSError as err:
+ logs.append(("ERROR", "Cannot remove the file '{0}': {1}".
+ format(build["file-name"], err)))
+ logs.append(("INFO", " Done."))
+
+ result = {
+ "data": data,
+ "state": state,
+ "job": job,
+ "build": build,
+ "logs": logs
+ }
+ data_queue.put(result)
+
+ def download_and_parse_data(self, repeat=1):
+ """Download the input data files, parse input data from input files and
+ store in pandas' Series.
+
+ :param repeat: Repeat the download specified number of times if not
+ successful.
+ :type repeat: int
"""
- logging.info("Parsing input files ...")
+ logging.info("Downloading and parsing input files ...")
+
+ work_queue = multiprocessing.JoinableQueue()
+ manager = multiprocessing.Manager()
+ data_queue = manager.Queue()
+ cpus = multiprocessing.cpu_count()
+
+ workers = list()
+ for cpu in range(cpus):
+ worker = Worker(work_queue,
+ data_queue,
+ self._download_and_parse_build)
+ worker.daemon = True
+ worker.start()
+ workers.append(worker)
+ os.system("taskset -p -c {0} {1} > /dev/null 2>&1".
+ format(cpu, worker.pid))
- job_data = dict()
for job, builds in self._cfg.builds.items():
- logging.info(" Extracting data from the job '{0}' ...'".
- format(job))
- builds_data = dict()
for build in builds:
- if build["status"] == "failed" \
- or build["status"] == "not found":
- continue
- logging.info(" Extracting data from the build '{0}'".
- format(build["build"]))
- logging.info(" Processing the file '{0}'".
- format(build["file-name"]))
- data = InputData._parse_tests(job, build)
- if data is None:
- logging.error("Input data file from the job '{job}', build "
- "'{build}' is damaged. Skipped.".
- format(job=job, build=build["build"]))
- continue
+ work_queue.put((job, build, repeat))
+
+ work_queue.join()
+
+ logging.info("Done.")
+
+ while not data_queue.empty():
+ result = data_queue.get()
+
+ job = result["job"]
+ build_nr = result["build"]["build"]
+ if result["data"]:
+ data = result["data"]
build_data = pd.Series({
"metadata": pd.Series(data["metadata"].values(),
index=data["metadata"].keys()),
"suites": pd.Series(data["suites"].values(),
index=data["suites"].keys()),
"tests": pd.Series(data["tests"].values(),
- index=data["tests"].keys()),
- })
- builds_data[str(build["build"])] = build_data
- logging.info(" Done.")
+ index=data["tests"].keys())})
- job_data[job] = pd.Series(builds_data.values(),
- index=builds_data.keys())
- logging.info(" Done.")
+ if self._input_data.get(job, None) is None:
+ self._input_data[job] = pd.Series()
+ self._input_data[job][str(build_nr)] = build_data
+
+ self._cfg.set_input_file_name(job, build_nr,
+ result["build"]["file-name"])
+
+ self._cfg.set_input_state(job, build_nr, result["state"])
+
+ for item in result["logs"]:
+ if item[0] == "INFO":
+ logging.info(item[1])
+ elif item[0] == "ERROR":
+ logging.error(item[1])
+ elif item[0] == "DEBUG":
+ logging.debug(item[1])
+ elif item[0] == "CRITICAL":
+ logging.critical(item[1])
+ elif item[0] == "WARNING":
+ logging.warning(item[1])
+
+ del data_queue
+
+ # Terminate all workers
+ for worker in workers:
+ worker.terminate()
+ worker.join()
- self._input_data = pd.Series(job_data.values(), index=job_data.keys())
logging.info("Done.")
@staticmethod
@@ -893,9 +1007,6 @@ class InputData(object):
:rtype pandas.Series
"""
- logging.info(" Creating the data set for the {0} '{1}'.".
- format(element.get("type", ""), element.get("title", "")))
-
try:
if element["filter"] in ("all", "template"):
cond = "True"
diff --git a/resources/tools/presentation/pal.py b/resources/tools/presentation/pal.py
index 98642c898c..013c921124 100644
--- a/resources/tools/presentation/pal.py
+++ b/resources/tools/presentation/pal.py
@@ -21,7 +21,6 @@ import logging
from errors import PresentationError
from environment import Environment, clean_environment
from specification_parser import Specification
-from input_data_files import download_data_files, unzip_files
from input_data_parser import InputData
from generator_tables import generate_tables
from generator_plots import generate_plots
@@ -30,8 +29,6 @@ from static_content import prepare_static_content
from generator_report import generate_report
from generator_CPTA import generate_cpta
-from pprint import pprint
-
def parse_args():
"""Parse arguments from cmd line.
@@ -86,21 +83,20 @@ def main():
logging.critical("Finished with error.")
return 1
- ret_code = 0
+ if spec.output["output"] not in ("report", "CPTA"):
+ logging.critical("The output '{0}' is not supported.".
+ format(spec.output["output"]))
+ return 1
+
+ ret_code = 1
try:
env = Environment(spec.environment, args.force)
env.set_environment()
- if spec.is_debug:
- if spec.debug["input-format"] == "zip":
- unzip_files(spec)
- else:
- download_data_files(spec)
-
prepare_static_content(spec)
data = InputData(spec)
- data.read_data()
+ data.download_and_parse_data(repeat=2)
generate_tables(spec, data)
generate_plots(spec, data)
@@ -112,21 +108,16 @@ def main():
elif spec.output["output"] == "CPTA":
sys.stdout.write(generate_cpta(spec, data))
logging.info("Successfully finished.")
- else:
- logging.critical("The output '{0}' is not supported.".
- format(spec.output["output"]))
- ret_code = 1
+ ret_code = 0
except (KeyError, ValueError, PresentationError) as err:
logging.info("Finished with an error.")
logging.critical(str(err))
- ret_code = 1
except Exception as err:
logging.info("Finished with an unexpected error.")
logging.critical(str(err))
- ret_code = 1
finally:
- if spec is not None and not spec.is_debug:
+ if spec is not None:
clean_environment(spec.environment)
return ret_code
diff --git a/resources/tools/presentation/specification.yaml b/resources/tools/presentation/specification.yaml
index da4443dc30..f3dba402da 100644
--- a/resources/tools/presentation/specification.yaml
+++ b/resources/tools/presentation/specification.yaml
@@ -1,14 +1,5 @@
-
type: "environment"
- configuration:
- # Debug mode:
- # - Skip:
- # - Download of input data files
- # - Do:
- # - Read data from given zip / xml files
- # - Set the configuration as it is done in normal mode
- # If the section "type: debug" is missing, CFG[DEBUG] is set to 0.
- CFG[DEBUG]: 0
paths:
# Top level directories:
@@ -38,9 +29,11 @@
DIR[DTR]: "{DIR[WORKING,SRC]}/detailed_test_results"
DIR[DTR,PERF,DPDK]: "{DIR[DTR]}/dpdk_performance_results"
DIR[DTR,PERF,VPP]: "{DIR[DTR]}/vpp_performance_results"
+ DIR[DTR,MRR,VPP]: "{DIR[DTR]}/vpp_mrr_results"
DIR[DTR,PERF,COT]: "{DIR[DTR]}/cot_performance_results"
DIR[DTR,PERF,HC]: "{DIR[DTR]}/honeycomb_performance_results"
DIR[DTR,FUNC,VPP]: "{DIR[DTR]}/vpp_functional_results"
+ DIR[DTR,FUNC,VPP,CENTOS]: "{DIR[DTR]}/vpp_functional_results_centos"
DIR[DTR,FUNC,HC]: "{DIR[DTR]}/honeycomb_functional_results"
DIR[DTR,FUNC,NSHSFC]: "{DIR[DTR]}/nshsfc_functional_results"
DIR[DTR,PERF,VPP,IMPRV]: "{DIR[WORKING,SRC]}/vpp_performance_tests/performance_improvements"
@@ -48,7 +41,9 @@
# Detailed test configurations
DIR[DTC]: "{DIR[WORKING,SRC]}/test_configuration"
DIR[DTC,PERF,VPP]: "{DIR[DTC]}/vpp_performance_configuration"
+ DIR[DTC,MRR,VPP]: "{DIR[DTC]}/vpp_mrr_configuration"
DIR[DTC,FUNC,VPP]: "{DIR[DTC]}/vpp_functional_configuration"
+ DIR[DTC,FUNC,VPP,CENTOS]: "{DIR[DTC]}/vpp_functional_configuration_centos"
# Detailed tests operational data
DIR[DTO]: "{DIR[WORKING,SRC]}/test_operational_data"
@@ -79,6 +74,7 @@
# List the directories which are deleted while cleaning the environment.
# All directories MUST be defined in "paths" section.
#- "DIR[BUILD,HTML]"
+ - "DIR[WORKING,DATA]"
build-dirs:
# List the directories where the results (build) is stored.
@@ -91,49 +87,19 @@
data-sets:
plot-vpp-http-server-performance:
csit-vpp-perf-1801-all:
- - 157
- - 158
- - 159
- - 160
- - 161
- - 164
- - 165
- - 166
- - 168
- - 169
- - 170
-# TODO: Add the data sources
-# vpp-meltdown-impact:
-# csit-vpp-perf-1707-all:
-# - 9
-# - 10
-# - 13
-# csit-vpp-perf-1710-all:
-# - 11l
-# - 12
-# - 13
-# TODO: Add the data sources
-# vpp-spectre-impact:
-# csit-vpp-perf-1707-all:
-# - 9
-# - 10
-# - 13
+ - 534 # wrk
+ vpp-performance-changes:
# csit-vpp-perf-1710-all:
# - 11
# - 12
# - 13
- vpp-performance-changes:
- csit-vpp-perf-1710-all:
- - 11
- - 12
- - 13
- - 14
- - 15
- - 16
- - 17
- - 18
- - 19
- - 20
+# - 14
+# - 15
+# - 16
+# - 17
+# - 18
+# - 19
+# - 20
csit-vpp-perf-1801-all:
- 124 # sel
- 127 # sel
@@ -147,133 +113,52 @@
- 163 # sel
- 167 # sel
- 172 # sel acl only
+ - 535 # full
+ - 539 # full
+ - 533 # full
+ - 540 # full
plot-throughput-speedup-analysis:
csit-vpp-perf-1801-all:
- - 122 # full
- - 126 # full
- - 129 # full
- - 140 # full
- - 124 # sel
- - 127 # sel
- - 128 # sel
- - 141 # sel
- - 142 # sel
- - 143 # sel
- - 145 # sel
- - 146 # sel
- - 162 # sel
- - 163 # sel
- - 167 # sel
- - 172 # sel acl only
-# performance-improvements:
-# csit-vpp-perf-1707-all:
-# - 9
-# - 10
-# - 13
-# - 14
-# - 15
-# - 16
-# - 17
-# - 18
-# - 19
-# - 21
-# csit-vpp-perf-1710-all:
-# - 11
-# - 12
-# - 13
-# - 14
-# - 15
-# - 16
-# - 17
-# - 18
-# - 19
-# - 20
-# csit-vpp-perf-1801-all:
-# - 124
-# - 127
-# - 128
-# csit-ligato-perf-1710-all:
-# - 5
-# - 7
-# - 8
-# - 9
-# - 10
-# - 11
-# - 12
-# - 13
-# - 16
-# - 17
-# csit-ligato-perf-1801-all:
-# - 16 # sel
-# - 17 # sel
-# - 18 # sel
-# - 19 # sel
-# - 20 # sel
-# - 21 # sel
-# - 22 # sel
-# - 23 # sel
-# - 24 # sel
+ - 535 # full
+ - 539 # full
+ - 533 # full
+ - 540 # full
+ plot-ligato-throughput-speedup-analysis:
+ csit-ligato-perf-1801-all:
+ - 27
vpp-perf-results:
csit-vpp-perf-1801-all:
- - 122
- - 126
- - 129
- - 140
+ - 535 # full
+ - 539 # full
+ - 533 # full
+ - 540 # full
vpp-func-results:
csit-vpp-functional-1801-ubuntu1604-virl:
- - "lastSuccessfulBuild"
+ - 454
+ vpp-func-results-centos:
+ csit-vpp-functional-1801-centos7-virl:
+ - 454
+ vpp-mrr-results:
+ csit-vpp-perf-check-1801:
+ - 18 # mrr - full
ligato-perf-results:
csit-ligato-perf-1801-all:
- - 25 # full
- dpdk-perf-results:
- csit-dpdk-perf-1801-all:
- - 12
+ - 27 # full
hc-func-results:
csit-hc2vpp-verify-func-1801-ubuntu1604:
- - "lastSuccessfulBuild"
+ - 14
nsh-func-results:
csit-nsh_sfc-verify-func-1801-ubuntu1604-virl:
- - 1
+ - 3
plot-vpp-throughput-latency:
csit-vpp-perf-1801-all:
- - 122 # full
- - 126 # full
- - 129 # full
- - 140 # full
- - 124 # sel
- - 127 # sel
- - 128 # sel
- - 141 # sel
- - 142 # sel
- - 143 # sel
- - 145 # sel
- - 146 # sel
- - 162 # sel
- - 163 # sel
- - 167 # sel
- - 172 # sel acl only
- plot-dpdk-throughput-latency:
- csit-dpdk-perf-1801-all:
- - 1
- - 3
- - 4
- - 5
- - 6
- - 7
- - 8
- - 10
- - 12
+ - 535 # full
+ - 539 # full
+ - 533 # full
+ - 540 # full
plot-ligato-throughput-latency:
csit-ligato-perf-1801-all:
- - 16 # sel
- - 17 # sel
- - 18 # sel
- - 19 # sel
- - 20 # sel
- - 21 # sel
- - 22 # sel
- - 23 # sel
- - 24 # sel
+ - 27 # full
plot-layouts:
@@ -492,27 +377,6 @@
height: 1000
-
- type: "debug"
- general:
- input-format: "xml" # zip or xml
- extract: "robot-plugin/output.xml" # Only for zip
- builds:
- # The files must be in the directory DIR[WORKING,DATA]
- csit-vpp-perf-1801-all:
- -
- build: 1
- file: "{DIR[WORKING,DATA]}/output.xml"
- -
- build: 2
- file: "{DIR[WORKING,DATA]}/output.xml"
- -
- build: 3
- file: "{DIR[WORKING,DATA]}/output.xml"
- -
- build: 4
- file: "{DIR[WORKING,DATA]}/output.xml"
-
--
type: "static"
src-path: "{DIR[RST]}"
dst-path: "{DIR[WORKING,SRC]}"
@@ -525,17 +389,6 @@
download-path: "{job}/{build}/robot/report/*zip*/{filename}"
extract: "robot-plugin/output.xml"
builds:
-# csit-vpp-perf-1707-all:
-# - 9
-# - 10
-# - 13
-# - 14
-# - 15
-# - 16
-# - 17
-# - 18
-# - 19
-# - 21
csit-vpp-perf-1710-all:
- 11
- 12
@@ -548,10 +401,6 @@
- 19
- 20
csit-vpp-perf-1801-all:
- - 122 # full
- - 126 # full
- - 129 # full
- - 140 # full
- 124 # sel
- 127 # sel
- 128 # sel
@@ -560,21 +409,17 @@
- 143 # sel
- 145 # sel
- 146 # sel
- - 157 # wrk
- - 158 # wrk
- - 159 # wrk
- - 160 # wrk
- - 161 # wrk
- 162 # sel
- 163 # sel
- - 164 # wrk
- - 165 # wrk
- - 166 # wrk
- 167 # sel
- - 168 # wrk
- - 169 # wrk
- - 170 # wrk
- 172 # sel acl only
+ - 535 # 18.01.2 full
+ - 539 # 18.01.2 full
+ - 533 # 18.01.2 full
+ - 540 # 18.01.2 full
+ - 534 # 18.01.2 wrk
+ csit-vpp-perf-check-1801:
+ - 18 # mrr full
csit-ligato-perf-1710-all:
- 5
- 7
@@ -586,44 +431,16 @@
- 13
- 16
- 17
- csit-dpdk-perf-1801-all:
- - 1
- - 4
- - 5
- - 7
- - 8
- - 10
- - 12
- - 16
- - 17
csit-ligato-perf-1801-all:
- - 16 # sel
- - 17 # sel
- - 18 # sel
- - 19 # sel
- - 20 # sel
- - 21 # sel
- - 22 # sel
- - 23 # sel
- - 24 # sel
- - 25 # full
- csit-dpdk-perf-1801-all:
- - 1
- - 3
- - 4
- - 5
- - 6
- - 7
- - 8
- - 9
- - 10
- - 12
+ - 27 # full
csit-vpp-functional-1801-ubuntu1604-virl:
- - lastSuccessfulBuild
+ - 454
+ csit-vpp-functional-1801-centos7-virl:
+ - 454
csit-nsh_sfc-verify-func-1801-ubuntu1604-virl:
- - 1
+ - 3
csit-hc2vpp-verify-func-1801-ubuntu1604:
- - lastSuccessfulBuild
+ - 14
-
type: "output"
@@ -638,74 +455,6 @@
### T A B L E S ###
################################################################################
-#-
-# type: "table"
-# title: "Performance Impact of Meltdown Patches"
-# algorithm: "table_performance_comparison"
-# output-file-ext: ".csv"
-## TODO: specify dir
-# output-file: "{DIR[STATIC,VPP]}/meltdown-impact"
-# reference:
-# title: "No Meltdown"
-## TODO: specify data sources
-# data:
-# csit-vpp-perf-1707-all:
-# - 9
-# - 10
-# - 13
-# compare:
-# title: "Meltdown Patches Applied"
-## TODO: specify data sources
-# data:
-# csit-vpp-perf-1710-all:
-# - 11
-# - 12
-# - 13
-# data:
-# "vpp-meltdown-impact"
-# filter: "all"
-# parameters:
-# - "name"
-# - "parent"
-# - "throughput"
-# # Number of the best and the worst tests presented in the table. Use 0 (zero)
-# # to present all tests.
-# nr-of-tests-shown: 20
-#
-#-
-# type: "table"
-# title: "Performance Impact of Spectre Patches"
-# algorithm: "table_performance_comparison"
-# output-file-ext: ".csv"
-## TODO: specify dir
-# output-file: "{DIR[STATIC,VPP]}/meltdown-spectre-impact"
-# reference:
-# title: "No Spectre"
-## TODO: specify data sources
-# data:
-# csit-vpp-perf-1707-all:
-# - 9
-# - 10
-# - 13
-# compare:
-# title: "Spectre Patches Applied"
-## TODO: specify data sources
-# data:
-# csit-vpp-perf-1710-all:
-# - 11
-# - 12
-# - 13
-# data:
-# "vpp-spectre-impact"
-# filter: "all"
-# parameters:
-# - "name"
-# - "parent"
-# - "throughput"
-# # Number of the best and the worst tests presented in the table. Use 0 (zero)
-# # to present all tests.
-# nr-of-tests-shown: 20
-
-
type: "table"
title: "VPP Performance Changes"
@@ -713,22 +462,7 @@
output-file-ext: ".csv"
output-file: "{DIR[STATIC,VPP]}/performance-changes"
reference:
- title: "Release 1710"
- data:
- csit-vpp-perf-1710-all:
- - 11
- - 12
- - 13
- - 14
- - 15
- - 16
- - 17
- - 18
- - 19
- - 20
- compare:
- title: "Release 1801"
-# TODO: specify data sources
+ title: "Release 18.01"
data:
csit-vpp-perf-1801-all:
- 124 # sel
@@ -743,6 +477,25 @@
- 163 # sel
- 167 # sel
- 172 # sel acl only
+# csit-vpp-perf-1710-all:
+# - 11
+# - 12
+# - 13
+# - 14
+# - 15
+# - 16
+# - 17
+# - 18
+# - 19
+# - 20
+ compare:
+ title: "Release 18.01.2"
+ data:
+ csit-vpp-perf-1801-all:
+ - 535 # full
+ - 539 # full
+ - 533 # full
+ - 540 # full
data: "vpp-performance-changes"
filter: "all"
parameters:
@@ -754,48 +507,6 @@
nr-of-tests-shown: 20
outlier-const: 1.5
-#-
-# type: "table"
-# title: "Performance improvements"
-# algorithm: "table_performance_improvements"
-# template: "{DIR[DTR,PERF,VPP,IMPRV]}/tmpl_performance_improvements.csv"
-# output-file-ext: ".csv"
-# output-file: "{DIR[DTR,PERF,VPP,IMPRV]}/performance_improvements"
-# columns:
-# -
-# title: "Test Name"
-# data: "template 1"
-# -
-# title: "16.09 mean [Mpps]"
-# data: "template 2"
-# -
-# title: "17.01 mean [Mpps]"
-# data: "template 3"
-# -
-# title: "17.04 mean [Mpps]"
-# data: "template 4"
-# -
-# title: "17.07 mean [Mpps]"
-# data: "data csit-vpp-perf-1707-all mean"
-# -
-# title: "17.10 mean [Mpps]"
-# data: "data csit-vpp-perf-1710-all csit-ligato-perf-1710-all mean"
-# -
-# title: "18.01 mean [Mpps]"
-# data: "data csit-vpp-perf-1801-all csit-ligato-perf-1801-all mean"
-# -
-# title: "18.01 stdev [Mpps]"
-# data: "data csit-vpp-perf-1801-all csit-ligato-perf-1801-all stdev"
-# -
-# title: "17.10 to 18.01 change [%]"
-# data: "operation relative_change 5 6"
-# rows: "generated"
-# data:
-# "performance-improvements"
-# filter: "template"
-# parameters:
-# - "throughput"
-
-
type: "table"
title: "Detailed Test Results - VPP Performance Results"
@@ -868,6 +579,54 @@
-
type: "table"
+ title: "Detailed Test Results - VPP MRR Results"
+ algorithm: "table_details"
+ output-file-ext: ".csv"
+ output-file: "{DIR[DTR,MRR,VPP]}/vpp_mrr_results"
+ columns:
+ -
+ title: "Name"
+ data: "data name"
+ -
+ title: "Documentation"
+ data: "data doc"
+ -
+ title: "Status"
+ data: "data msg"
+ rows: "generated"
+ data:
+ "vpp-mrr-results"
+ filter: "'MRR'"
+ parameters:
+ - "name"
+ - "parent"
+ - "doc"
+ - "msg"
+
+-
+ type: "table"
+ title: "Test configuration - VPP MRR Test Configs"
+ algorithm: "table_details"
+ output-file-ext: ".csv"
+ output-file: "{DIR[DTC,MRR,VPP]}/vpp_mrr_test_configuration"
+ columns:
+ -
+ title: "Name"
+ data: "data name"
+ -
+ title: "VPP API Test (VAT) Commands History - Commands Used Per Test Case"
+ data: "data vat-history"
+ rows: "generated"
+ data:
+ "vpp-mrr-results"
+ filter: "'MRR'"
+ parameters:
+ - "parent"
+ - "name"
+ - "vat-history"
+
+-
+ type: "table"
title: "Detailed Test Results - VPP Functional Results"
algorithm: "table_details"
output-file-ext: ".csv"
@@ -894,6 +653,32 @@
-
type: "table"
+ title: "Detailed Test Results - VPP Functional Results - CentOS"
+ algorithm: "table_details"
+ output-file-ext: ".csv"
+ output-file: "{DIR[DTR,FUNC,VPP,CENTOS]}/vpp_functional_results_centos"
+ columns:
+ -
+ title: "Name"
+ data: "data name"
+ -
+ title: "Documentation"
+ data: "data doc"
+ -
+ title: "Status"
+ data: "data status"
+ rows: "generated"
+ data:
+ "vpp-func-results-centos"
+ filter: "all"
+ parameters:
+ - "name"
+ - "parent"
+ - "doc"
+ - "status"
+
+-
+ type: "table"
title: "Test configuration - VPP Functional Test Configs"
algorithm: "table_details"
output-file-ext: ".csv"
@@ -916,36 +701,32 @@
-
type: "table"
- title: "Detailed Test Results - Container Orchestrated Topologies Performance Results"
+ title: "Test configuration - VPP Functional Test Configs - CentOS"
algorithm: "table_details"
output-file-ext: ".csv"
- output-file: "{DIR[DTR,PERF,COT]}/cot_performance_results"
+ output-file: "{DIR[DTC,FUNC,VPP,CENTOS]}/vpp_functional_configuration_centos"
columns:
-
title: "Name"
data: "data name"
-
- title: "Documentation"
- data: "data doc"
- -
- title: "Status"
- data: "data msg"
+ title: "VPP API Test (VAT) Commands History - Commands Used Per Test Case"
+ data: "data vat-history"
rows: "generated"
data:
- "ligato-perf-results"
+ "vpp-func-results-centos"
filter: "all"
parameters:
- - "name"
- "parent"
- - "doc"
- - "msg"
+ - "name"
+ - "vat-history"
-
type: "table"
- title: "Detailed Test Results - DPDK Performance Results"
+ title: "Detailed Test Results - Container Orchestrated Topologies Performance Results"
algorithm: "table_details"
output-file-ext: ".csv"
- output-file: "{DIR[DTR,PERF,DPDK]}/dpdk_performance_results"
+ output-file: "{DIR[DTR,PERF,COT]}/cot_performance_results"
columns:
-
title: "Name"
@@ -958,7 +739,7 @@
data: "data msg"
rows: "generated"
data:
- "dpdk-perf-results"
+ "ligato-perf-results"
filter: "all"
parameters:
- "name"
@@ -1081,6 +862,44 @@
-
type: "file"
+ title: "VPP MRR Results"
+ algorithm: "file_test_results"
+ output-file-ext: ".rst"
+ output-file: "{DIR[DTR,MRR,VPP]}/vpp_mrr_results"
+ file-header: "\n.. |br| raw:: html\n\n <br />\n\n\n.. |prein| raw:: html\n\n <pre>\n\n\n.. |preout| raw:: html\n\n </pre>\n\n"
+ dir-tables: "{DIR[DTR,MRR,VPP]}"
+ data:
+ "vpp-mrr-results"
+ filter: "'MRR'"
+ parameters:
+ - "name"
+ - "doc"
+ - "level"
+ - "parent"
+ data-start-level: 2 # 0, 1, 2, ...
+ chapters-start-level: 2 # 0, 1, 2, ...
+
+-
+ type: "file"
+ title: "VPP MRR Configuration"
+ algorithm: "file_test_results"
+ output-file-ext: ".rst"
+ output-file: "{DIR[DTC,MRR,VPP]}/vpp_mrr_configuration"
+ file-header: "\n.. |br| raw:: html\n\n <br />\n\n\n.. |prein| raw:: html\n\n <pre>\n\n\n.. |preout| raw:: html\n\n </pre>\n\n"
+ dir-tables: "{DIR[DTC,MRR,VPP]}"
+ data:
+ "vpp-mrr-results"
+ filter: "'MRR'"
+ parameters:
+ - "name"
+ - "doc"
+ - "level"
+ - "parent"
+ data-start-level: 2 # 0, 1, 2, ...
+ chapters-start-level: 2 # 0, 1, 2, ...
+
+-
+ type: "file"
title: "VPP Functional Results"
algorithm: "file_test_results"
output-file-ext: ".rst"
@@ -1099,6 +918,24 @@
-
type: "file"
+ title: "VPP Functional Results - CentOS"
+ algorithm: "file_test_results"
+ output-file-ext: ".rst"
+ output-file: "{DIR[DTR,FUNC,VPP,CENTOS]}/vpp_functional_results_centos"
+ file-header: "\n.. |br| raw:: html\n\n <br />\n\n\n.. |prein| raw:: html\n\n <pre>\n\n\n.. |preout| raw:: html\n\n </pre>\n\n"
+ dir-tables: "{DIR[DTR,FUNC,VPP,CENTOS]}"
+ data:
+ "vpp-func-results-centos"
+ filter: "all"
+ parameters:
+ - "name"
+ - "doc"
+ - "level"
+ data-start-level: 3 # 0, 1, 2, ...
+ chapters-start-level: 2 # 0, 1, 2, ...
+
+-
+ type: "file"
title: "VPP Functional Configuration"
algorithm: "file_test_results"
output-file-ext: ".rst"
@@ -1117,39 +954,37 @@
-
type: "file"
- title: "Container Orchestrated Performance Results"
+ title: "VPP Functional Configuration - CentOS"
algorithm: "file_test_results"
output-file-ext: ".rst"
- output-file: "{DIR[DTR,PERF,COT]}/cot_performance_results"
+ output-file: "{DIR[DTC,FUNC,VPP,CENTOS]}/vpp_functional_configuration_centos"
file-header: "\n.. |br| raw:: html\n\n <br />\n\n\n.. |prein| raw:: html\n\n <pre>\n\n\n.. |preout| raw:: html\n\n </pre>\n\n"
- dir-tables: "{DIR[DTR,PERF,COT]}"
+ dir-tables: "{DIR[DTC,FUNC,VPP,CENTOS]}"
data:
- "ligato-perf-results"
+ "vpp-func-results-centos"
filter: "all"
parameters:
- "name"
- "doc"
- "level"
- data-start-level: 2 # 0, 1, 2, ...
+ data-start-level: 3 # 0, 1, 2, ...
chapters-start-level: 2 # 0, 1, 2, ...
-
type: "file"
- title: "DPDK Performance Results"
+ title: "Container Orchestrated Performance Results"
algorithm: "file_test_results"
output-file-ext: ".rst"
- output-file: "{DIR[DTR,PERF,DPDK]}/dpdk_performance_results"
+ output-file: "{DIR[DTR,PERF,COT]}/cot_performance_results"
file-header: "\n.. |br| raw:: html\n\n <br />\n\n\n.. |prein| raw:: html\n\n <pre>\n\n\n.. |preout| raw:: html\n\n </pre>\n\n"
- dir-tables: "{DIR[DTR,PERF,DPDK]}"
+ dir-tables: "{DIR[DTR,PERF,COT]}"
data:
- "dpdk-perf-results"
+ "ligato-perf-results"
filter: "all"
parameters:
- "name"
- "doc"
- "level"
- chapters:
- - "suites"
data-start-level: 2 # 0, 1, 2, ...
chapters-start-level: 2 # 0, 1, 2, ...
@@ -1415,6 +1250,452 @@
layout:
"plot-throughput-speedup-analysis"
+# SRv6 - 10ge2p1x520 - NDR
+-
+ type: "plot"
+ title: "TSA: 78B-*-ethip6-ip6(base|scale)*ndrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-78B-srv6-tsa-ndrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '78B' and 'FEATURE' and 'NDRDISC' and 'IP6FWD' and 'SRv6'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "78B-*-ethip6-ip6(base|scale)*ndrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# SRv6 - 10ge2p1x520 - PDR
+-
+ type: "plot"
+ title: "TSA: 78B-*-ethip6-ip6(base|scale)*pdrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-78B-srv6-tsa-pdrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '78B' and 'FEATURE' and 'PDRDISC' and not 'NDRDISC' and 'IP6FWD' and 'SRv6'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "78B-*-ethip6-ip6(base|scale)*pdrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# IP4_overlay - NDR
+-
+ type: "plot"
+ title: "TSA: 64B-*-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-ethip4-tsa-ndrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '64B' and 'ENCAP' and 'NDRDISC' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST' and not 'IPSECHW'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-ethip4[a-z0-9]+-[a-z0-9]*-ndrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# IP4_overlay - PDR
+-
+ type: "plot"
+ title: "TSA: 64B-*-ethip4[a-z0-9]+-[a-z0-9]*-pdrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-ethip4-tsa-pdrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '64B' and 'ENCAP' and 'PDRDISC' and not 'NDRDISC' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST' and not 'IPSECHW'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-ethip4[a-z0-9]+-[a-z0-9]*-pdrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# IP6_overlay - NDR
+-
+ type: "plot"
+ title: "TSA: 78B-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-78B-ethip6-tsa-ndrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '78B' and 'ENCAP' and 'NDRDISC' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "78B-*-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# IP6_overlay - PDR
+-
+ type: "plot"
+ title: "TSA: 78B-*-ethip6[a-z0-9]+-[a-z0-9]*-pdrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-78B-ethip6-tsa-pdrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '78B' and 'ENCAP' and 'PDRDISC' and not 'NDRDISC' and ('VXLAN' or 'VXLANGPE' or 'LISP' or 'LISPGPE' or 'GRE') and not 'VHOST'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "78B-*-ethip6[a-z0-9]+-[a-z0-9]*-pdrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# VM VHOST - NDR
+-
+ type: "plot"
+ title: "TSA: 64B-*-.*vhost.*-ndrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-vhost-sel1-tsa-ndrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '64B' and 'NDRDISC' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD')"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-.*vhost.*-ndrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+-
+ type: "plot"
+ title: "TSA: 64B-*-.*vhost.*-ndrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/40ge2p1xl710-64B-vhost-sel1-tsa-ndrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-XL710' and '64B' and 'NDRDISC' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD')"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-.*vhost.*-ndrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+-
+ type: "plot"
+ title: "TSA: 64B-*-.*vhost.*-ndrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-vhost-sel2-tsa-ndrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '64B' and 'NDRDISC' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q' and not '2VM'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-.*vhost.*-ndrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+-
+ type: "plot"
+ title: "TSA: 64B-*-.*vhost.*-ndrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x710-64B-vhost-sel2-tsa-ndrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X710' and '64B' and 'NDRDISC' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-.*vhost.*-ndrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+-
+ type: "plot"
+ title: "TSA: 64B-*-.*vhost.*-ndrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/40ge2p1xl710-64B-vhost-sel2-tsa-ndrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-XL710' and '64B' and 'NDRDISC' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q' and not '2VM'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-.*vhost.*-ndrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# VM VHOST - PDR
+-
+ type: "plot"
+ title: "TSA: 64B-*-.*vhost.*-pdrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-vhost-sel1-tsa-pdrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '64B' and 'PDRDISC' and not 'NDRDISC' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD')"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-.*vhost.*-pdrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+-
+ type: "plot"
+ title: "TSA: 64B-*-.*vhost.*-pdrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/40ge2p1xl710-64B-vhost-sel1-tsa-pdrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-XL710' and '64B' and 'PDRDISC' and not 'NDRDISC' and 'VHOST' and not ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD')"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-.*vhost.*-pdrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+-
+ type: "plot"
+ title: "TSA: 64B-*-.*vhost.*-pdrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-vhost-sel2-tsa-pdrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '64B' and 'PDRDISC' and not 'NDRDISC' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q' and not '2VM'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-.*vhost.*-pdrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+-
+ type: "plot"
+ title: "TSA: 64B-*-.*vhost.*-pdrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x710-64B-vhost-sel2-tsa-pdrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X710' and '64B' and 'PDRDISC' and not 'NDRDISC' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-.*vhost.*-pdrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+-
+ type: "plot"
+ title: "TSA: 64B-*-.*vhost.*-pdrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/40ge2p1xl710-64B-vhost-sel2-tsa-pdrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-XL710' and '64B' and 'PDRDISC' and not 'NDRDISC' and 'VHOST' and not 'VXLAN' and not 'IP4FWD' and not 'DOT1Q' and not '2VM'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-.*vhost.*-pdrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# CRYPTO - NDR
+-
+ type: "plot"
+ title: "TSA: 64B-*-.*ipsec.*-ndrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/40ge2p1xl710-64B-ipsechw-tsa-ndrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-XL710' and '64B' and not 'VHOST' and 'IP4FWD' and 'NDRDISC' and 'IPSECHW' and ('IPSECTRAN' or 'IPSECTUN')"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-.*ipsec.*-ndrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# CRYPTO - PDR
+-
+ type: "plot"
+ title: "TSA: 64B-*-.*ipsec.*-pdrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/40ge2p1xl710-64B-ipsechw-tsa-pdrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-XL710' and '64B' and not 'VHOST' and 'IP4FWD' and 'PDRDISC' and not 'NDRDISC' and 'IPSECHW' and ('IPSECTRAN' or 'IPSECTUN')"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-.*ipsec.*-pdrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# Container memif - NDR
+-
+ type: "plot"
+ title: "TSA: 64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-container-memif-tsa-ndrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '64B' and 'BASE' and 'NDRDISC' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# Container memif - PDR
+-
+ type: "plot"
+ title: "TSA: 64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-container-memif-tsa-pdrdisc"
+ data:
+ "plot-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '64B' and 'BASE' and 'PDRDISC' and not 'NDRDISC' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# Container orchestrated - NDR
+-
+ type: "plot"
+ title: "TSA: 64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-container-orchestrated-tsa-ndrdisc"
+ data:
+ "plot-ligato-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '64B' and ('BASE' or 'SCALE') and 'NDRDISC' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+-
+ type: "plot"
+ title: "TSA: 64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x710-64B-container-orchestrated-tsa-ndrdisc"
+ data:
+ "plot-ligato-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X710' and '64B' and ('BASE' or 'SCALE') and 'NDRDISC' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+# Container orchestrated - PDR
+-
+ type: "plot"
+ title: "TSA: 64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x520-64B-container-orchestrated-tsa-pdrdisc"
+ data:
+ "plot-ligato-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X520-DA2' and '64B' and ('BASE' or 'SCALE') and 'PDRDISC' and not 'NDRDISC' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
+-
+ type: "plot"
+ title: "TSA: 64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc"
+ algorithm: "plot_throughput_speedup_analysis"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/10ge2p1x710-64B-container-orchestrated-tsa-pdrdisc"
+ data:
+ "plot-ligato-throughput-speedup-analysis"
+ filter: "'NIC_Intel-X710' and '64B' and ('BASE' or 'SCALE') and 'PDRDISC' and not 'NDRDISC' and 'MEMIF' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
+ parameters:
+ - "throughput"
+ - "parent"
+ - "tags"
+ layout:
+ title: "64B-*-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc"
+ layout:
+ "plot-throughput-speedup-analysis"
+
# Plot packets per second
# VPP L2 sel1
@@ -1683,7 +1964,7 @@
output-file: "{DIR[STATIC,VPP]}/78B-1t1c-ethip6-ip6-ndrdisc"
data:
"plot-vpp-throughput-latency"
- filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'NDRDISC' and '1T1C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST'"
+ filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'NDRDISC' and '1T1C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST' and not 'SRv6'"
parameters:
- "throughput"
- "parent"
@@ -1704,7 +1985,7 @@
output-file: "{DIR[STATIC,VPP]}/78B-2t2c-ethip6-ip6-ndrdisc"
data:
"plot-vpp-throughput-latency"
- filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'NDRDISC' and '2T2C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST'"
+ filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'NDRDISC' and '2T2C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST' and not 'SRv6'"
parameters:
- "throughput"
- "parent"
@@ -1725,7 +2006,7 @@
output-file: "{DIR[STATIC,VPP]}/78B-1t1c-ethip6-ip6-pdrdisc"
data:
"plot-vpp-throughput-latency"
- filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'PDRDISC' and not 'NDRDISC' and '1T1C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST'"
+ filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'PDRDISC' and not 'NDRDISC' and '1T1C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST' and not 'SRv6'"
parameters:
- "throughput"
- "parent"
@@ -1746,7 +2027,7 @@
output-file: "{DIR[STATIC,VPP]}/78B-2t2c-ethip6-ip6-pdrdisc"
data:
"plot-vpp-throughput-latency"
- filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'PDRDISC' and not 'NDRDISC' and '2T2C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST'"
+ filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'PDRDISC' and not 'NDRDISC' and '2T2C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST' and not 'SRv6'"
parameters:
- "throughput"
- "parent"
@@ -1759,6 +2040,91 @@
layout:
"plot-throughput"
+# VPP SRv6
+-
+ type: "plot"
+ title: "VPP Performance 78B-1t1c-ethip6*srv6*ndrdisc"
+ algorithm: "plot_performance_box"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/78B-1t1c-ethip6-srv6-ndrdisc"
+ data:
+ "plot-vpp-throughput-latency"
+ filter: "'78B' and 'FEATURE' and 'NDRDISC' and '1T1C' and 'IP6FWD' and 'SRv6'"
+ parameters:
+ - "throughput"
+ - "parent"
+ traces:
+ hoverinfo: "x+y"
+ boxpoints: "outliers"
+ whiskerwidth: 0
+ layout:
+ title: "78B-1t1c-ethip6*srv6*ndrdisc"
+ layout:
+ "plot-throughput"
+
+-
+ type: "plot"
+ title: "VPP Performance 78B-2t2c-ethip6*srv6*ndrdisc"
+ algorithm: "plot_performance_box"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/78B-2t2c-ethip6-srv6-ndrdisc"
+ data:
+ "plot-vpp-throughput-latency"
+ filter: "'78B' and 'FEATURE' and 'NDRDISC' and '2T2C' and 'IP6FWD' and 'SRv6'"
+ parameters:
+ - "throughput"
+ - "parent"
+ traces:
+ hoverinfo: "x+y"
+ boxpoints: "outliers"
+ whiskerwidth: 0
+ layout:
+ title: "78B-2t2c-ethip6*srv6*ndrdisc"
+ layout:
+ "plot-throughput"
+
+-
+ type: "plot"
+ title: "VPP Performance 78B-1t1c-ethip6*srv6*pdrdisc"
+ algorithm: "plot_performance_box"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/78B-1t1c-ethip6-srv6-pdrdisc"
+ data:
+ "plot-vpp-throughput-latency"
+ filter: "'78B' and 'FEATURE' and 'PDRDISC' and not 'NDRDISC' and '1T1C' and 'IP6FWD' and 'SRv6'"
+ parameters:
+ - "throughput"
+ - "parent"
+ traces:
+ hoverinfo: "x+y"
+ boxpoints: "outliers"
+ whiskerwidth: 0
+ layout:
+ title: "78B-1t1c-ethip6*srv6*pdrdisc"
+ layout:
+ "plot-throughput"
+
+-
+ type: "plot"
+ title: "VPP Performance 78B-2t2c-ethip6*srv6*pdrdisc"
+ algorithm: "plot_performance_box"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/78B-2t2c-ethip6-srv6-pdrdisc"
+ data:
+ "plot-vpp-throughput-latency"
+ filter: "'78B' and 'FEATURE' and 'PDRDISC' and not 'NDRDISC' and '2T2C' and 'IP6FWD' and 'SRv6'"
+ parameters:
+ - "throughput"
+ - "parent"
+ traces:
+ hoverinfo: "x+y"
+ boxpoints: "outliers"
+ whiskerwidth: 0
+ layout:
+ title: "78B-2t2c-ethip6*srv6*pdrdisc"
+ layout:
+ "plot-throughput"
+
# VPP IP4_overlay
-
type: "plot"
@@ -1863,45 +2229,8 @@
whiskerwidth: 0
layout:
title: "78B-1t1c-ethip6[a-z0-9]+-[a-z0-9]*-ndrdisc"
- xaxis:
- autorange: True
- autotick: False
- fixedrange: False
- gridcolor: "rgb(238, 238, 238)"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- tickmode: "linear"
- title: "Indexed Test Cases"
- zeroline: False
- yaxis:
- gridcolor: "rgb(238, 238, 238)'"
- hoverformat: ".4s"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- range: [2000000, 6000000]
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- title: "Packets Per Second [pps]"
- zeroline: False
- boxmode: "group"
- boxgroupgap: 0.5
- autosize: False
- margin:
- t: 50
- b: 20
- l: 50
- r: 20
- showlegend: True
- legend:
- orientation: "h"
- width: 700
- height: 1000
+ layout:
+ "plot-throughput"
-
type: "plot"
@@ -1963,45 +2292,8 @@
whiskerwidth: 0
layout:
title: "78B-2t2c-ethip6[a-z0-9]+-[a-z0-9]*-pdrdisc"
- xaxis:
- autorange: True
- autotick: False
- fixedrange: False
- gridcolor: "rgb(238, 238, 238)"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- tickmode: "linear"
- title: "Indexed Test Cases"
- zeroline: False
- yaxis:
- gridcolor: "rgb(238, 238, 238)'"
- hoverformat: ".4s"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- range: [4000000, 12000000]
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- title: "Packets Per Second [pps]"
- zeroline: False
- boxmode: "group"
- boxgroupgap: 0.5
- autosize: False
- margin:
- t: 50
- b: 20
- l: 50
- r: 20
- showlegend: True
- legend:
- orientation: "h"
- width: 700
- height: 1000
+ layout:
+ "plot-throughput"
# VPP VM VHOST
-
@@ -2022,45 +2314,8 @@
whiskerwidth: 0
layout:
title: "64B-1t1c-.*vhost.*-ndrdisc"
- xaxis:
- autorange: True
- autotick: False
- fixedrange: False
- gridcolor: "rgb(238, 238, 238)"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- tickmode: "linear"
- title: "Indexed Test Cases"
- zeroline: False
- yaxis:
- gridcolor: "rgb(238, 238, 238)'"
- hoverformat: ".4s"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- range: [0, 3500000]
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- title: "Packets Per Second [pps]"
- zeroline: False
- boxmode: "group"
- boxgroupgap: 0.5
- autosize: False
- margin:
- t: 50
- b: 20
- l: 50
- r: 20
- showlegend: True
- legend:
- orientation: "h"
- width: 700
- height: 1000
+ layout:
+ "plot-throughput"
-
type: "plot"
@@ -2295,323 +2550,6 @@
layout:
"plot-throughput"
-# DPDK
--
- type: "plot"
- title: "DPDK Performance 64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc"
- algorithm: "plot_performance_box"
- output-file-type: ".html"
- output-file: "{DIR[STATIC,DPDK]}/64B-1t1c-l2-ndrdisc"
- data:
- "plot-dpdk-throughput-latency"
- filter: "'64B' and 'BASE' and 'NDRDISC' and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
- parameters:
- - "throughput"
- - "parent"
- traces:
- hoverinfo: "x+y"
- boxpoints: "outliers"
- whiskerwidth: 0
- layout:
- title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc"
- layout:
- "plot-throughput"
-
--
- type: "plot"
- title: "DPDK Performance 64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc"
- algorithm: "plot_performance_box"
- output-file-type: ".html"
- output-file: "{DIR[STATIC,DPDK]}/64B-2t2c-l2-ndrdisc"
- data:
- "plot-dpdk-throughput-latency"
- filter: "'64B' and 'BASE' and 'NDRDISC' and '2T2C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
- parameters:
- - "throughput"
- - "parent"
- traces:
- hoverinfo: "x+y"
- boxpoints: "outliers"
- whiskerwidth: 0
- layout:
- title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc"
- layout:
- "plot-throughput"
-
--
- type: "plot"
- title: "DPDK Performance 64B-1t1c-ethip4-ip4base-l3fwd-ndrdisc"
- algorithm: "plot_performance_box"
- output-file-type: ".html"
- output-file: "{DIR[STATIC,DPDK]}/64B-1t1c-ipv4-ndrdisc"
- data:
- "plot-dpdk-throughput-latency"
- filter: "'64B' and 'BASE' and 'NDRDISC' and '1T1C' and 'IP4FWD'"
- parameters:
- - "throughput"
- - "parent"
- traces:
- hoverinfo: "x+y"
- boxpoints: "outliers"
- whiskerwidth: 0
- layout:
- title: "64B-1t1c-ethip4-ip4base-l3fwd-ndrdisc"
- xaxis:
- autorange: True
- autotick: False
- fixedrange: False
- gridcolor: "rgb(238, 238, 238)"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- tickmode: "linear"
- title: "Indexed Test Cases"
- zeroline: False
- yaxis:
- gridcolor: "rgb(238, 238, 238)'"
- hoverformat: ".4s"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- range: [2000000, 12000000]
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- title: "Packets Per Second [pps]"
- zeroline: False
- boxmode: "group"
- boxgroupgap: 0.5
- autosize: False
- margin:
- t: 50
- b: 20
- l: 50
- r: 20
- showlegend: True
- legend:
- orientation: "h"
- width: 700
- height: 1000
-
--
- type: "plot"
- title: "DPDK Performance 64B-2t2c-ethip4-ip4base-l3fwd-ndrdisc"
- algorithm: "plot_performance_box"
- output-file-type: ".html"
- output-file: "{DIR[STATIC,DPDK]}/64B-2t2c-ipv4-ndrdisc"
- data:
- "plot-dpdk-throughput-latency"
- filter: "'64B' and 'BASE' and 'NDRDISC' and '2T2C' and 'IP4FWD'"
- parameters:
- - "throughput"
- - "parent"
- traces:
- hoverinfo: "x+y"
- boxpoints: "outliers"
- whiskerwidth: 0
- layout:
- title: "64B-2t2c-ethip4-ip4base-l3fwd-ndrdisc"
- xaxis:
- autorange: True
- autotick: False
- fixedrange: False
- gridcolor: "rgb(238, 238, 238)"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- tickmode: "linear"
- title: "Indexed Test Cases"
- zeroline: False
- yaxis:
- gridcolor: "rgb(238, 238, 238)'"
- hoverformat: ".4s"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- range: [2000000, 12000000]
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- title: "Packets Per Second [pps]"
- zeroline: False
- boxmode: "group"
- boxgroupgap: 0.5
- autosize: False
- margin:
- t: 50
- b: 20
- l: 50
- r: 20
- showlegend: True
- legend:
- orientation: "h"
- width: 700
- height: 1000
-
--
- type: "plot"
- title: "DPDK Performance 64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc"
- algorithm: "plot_performance_box"
- output-file-type: ".html"
- output-file: "{DIR[STATIC,DPDK]}/64B-1t1c-l2-pdrdisc"
- data:
- "plot-dpdk-throughput-latency"
- filter: "'64B' and 'BASE' and 'PDRDISC' and not 'NDRDISC' and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
- parameters:
- - "throughput"
- - "parent"
- traces:
- hoverinfo: "x+y"
- boxpoints: "outliers"
- whiskerwidth: 0
- layout:
- title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc"
- layout:
- "plot-throughput"
-
--
- type: "plot"
- title: "DPDK Performance 64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc"
- algorithm: "plot_performance_box"
- output-file-type: ".html"
- output-file: "{DIR[STATIC,DPDK]}/64B-2t2c-l2-pdrdisc"
- data:
- "plot-dpdk-throughput-latency"
- filter: "'64B' and 'BASE' and 'PDRDISC' and not 'NDRDISC' and '2T2C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
- parameters:
- - "throughput"
- - "parent"
- traces:
- hoverinfo: "x+y"
- boxpoints: "outliers"
- whiskerwidth: 0
- layout:
- title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-pdrdisc"
- layout:
- "plot-throughput"
-
--
- type: "plot"
- title: "DPDK Performance 64B-1t1c-ethip4-ip4base-l3fwd-pdrdisc"
- algorithm: "plot_performance_box"
- output-file-type: ".html"
- output-file: "{DIR[STATIC,DPDK]}/64B-1t1c-ipv4-pdrdisc"
- data:
- "plot-dpdk-throughput-latency"
- filter: "'64B' and 'BASE' and 'PDRDISC' and not 'NDRDISC' and '1T1C' and 'IP4FWD'"
- parameters:
- - "throughput"
- - "parent"
- traces:
- hoverinfo: "x+y"
- boxpoints: "outliers"
- whiskerwidth: 0
- layout:
- title: "64B-1t1c-ethip4-ip4base-l3fwd-pdrdisc"
- xaxis:
- autorange: True
- autotick: False
- fixedrange: False
- gridcolor: "rgb(238, 238, 238)"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- tickmode: "linear"
- title: "Indexed Test Cases"
- zeroline: False
- yaxis:
- gridcolor: "rgb(238, 238, 238)'"
- hoverformat: ".4s"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- range: [20000000, 30000000]
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- title: "Packets Per Second [pps]"
- zeroline: False
- boxmode: "group"
- boxgroupgap: 0.5
- autosize: False
- margin:
- t: 50
- b: 20
- l: 50
- r: 20
- showlegend: True
- legend:
- orientation: "h"
- width: 700
- height: 1000
-
--
- type: "plot"
- title: "DPDK Performance 64B-2t2c-ethip4-ip4base-l3fwd-pdrdisc"
- algorithm: "plot_performance_box"
- output-file-type: ".html"
- output-file: "{DIR[STATIC,DPDK]}/64B-2t2c-ipv4-pdrdisc"
- data:
- "plot-dpdk-throughput-latency"
- filter: "'64B' and 'BASE' and 'PDRDISC' and not 'NDRDISC' and '2T2C' and 'IP4FWD'"
- parameters:
- - "throughput"
- - "parent"
- traces:
- hoverinfo: "x+y"
- boxpoints: "outliers"
- whiskerwidth: 0
- layout:
- title: "64B-2t2c-ethip4-ip4base-l3fwd-pdrdisc"
- xaxis:
- autorange: True
- autotick: False
- fixedrange: False
- gridcolor: "rgb(238, 238, 238)"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- tickmode: "linear"
- title: "Indexed Test Cases"
- zeroline: False
- yaxis:
- gridcolor: "rgb(238, 238, 238)'"
- hoverformat: ".4s"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- range: [20000000, 30000000]
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- title: "Packets Per Second [pps]"
- zeroline: False
- boxmode: "group"
- boxgroupgap: 0.5
- autosize: False
- margin:
- t: 50
- b: 20
- l: 50
- r: 20
- showlegend: True
- legend:
- orientation: "h"
- width: 700
- height: 1000
-
# Plot latency
# VPP L2 sel1
@@ -2740,7 +2678,7 @@
output-file: "{DIR[STATIC,VPP]}/78B-1t1c-ethip6-ip6-ndrdisc-lat50"
data:
"plot-vpp-throughput-latency"
- filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'NDRDISC' and '1T1C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST'"
+ filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'NDRDISC' and '1T1C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST' and not 'SRv6'"
parameters:
- "latency"
- "parent"
@@ -2759,7 +2697,7 @@
output-file: "{DIR[STATIC,VPP]}/78B-2t2c-ethip6-ip6-ndrdisc-lat50"
data:
"plot-vpp-throughput-latency"
- filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'NDRDISC' and '2T2C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST'"
+ filter: "'78B' and ('BASE' or 'SCALE' or 'FEATURE') and 'NDRDISC' and '2T2C' and 'IP6FWD' and not 'IPSEC' and not 'VHOST' and not 'SRv6'"
parameters:
- "latency"
- "parent"
@@ -2770,6 +2708,45 @@
layout:
"plot-latency"
+# VPP SRv6
+-
+ type: "plot"
+ title: "VPP Latency 78B-1t1c-ethip6*srv6*ndrdisc"
+ algorithm: "plot_latency_box"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/78B-1t1c-ethip6-srv6-ndrdisc-lat50"
+ data:
+ "plot-vpp-throughput-latency"
+ filter: "'78B' and 'FEATURE' and 'NDRDISC' and '1T1C' and 'IP6FWD' and 'SRv6'"
+ parameters:
+ - "latency"
+ - "parent"
+ traces:
+ boxmean: False
+ layout:
+ title: "78B-1t1c-ethip6*srv6*ndrdisc"
+ layout:
+ "plot-latency"
+
+-
+ type: "plot"
+ title: "VPP Latency 78B-2t2c-ethip6*srv6*ndrdisc"
+ algorithm: "plot_latency_box"
+ output-file-type: ".html"
+ output-file: "{DIR[STATIC,VPP]}/78B-2t2c-ethip6-srv6-ndrdisc-lat50"
+ data:
+ "plot-vpp-throughput-latency"
+ filter: "'78B' and 'FEATURE' and 'NDRDISC' and '2T2C' and 'IP6FWD' and 'SRv6'"
+ parameters:
+ - "latency"
+ - "parent"
+ traces:
+ boxmean: False
+ layout:
+ title: "78B-2t2c-ethip6*srv6*ndrdisc"
+ layout:
+ "plot-latency"
+
# VPP IP4_overlay
-
type: "plot"
@@ -2965,83 +2942,6 @@
layout:
"plot-latency"
-# DPDK
--
- type: "plot"
- title: "DPDK Latency 64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc"
- algorithm: "plot_latency_box"
- output-file-type: ".html"
- output-file: "{DIR[STATIC,DPDK]}/64B-1t1c-l2-ndrdisc-lat50"
- data:
- "plot-dpdk-throughput-latency"
- filter: "'64B' and 'BASE' and 'NDRDISC' and '1T1C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
- parameters:
- - "latency"
- - "parent"
- traces:
- boxmean: False
- layout:
- title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc"
- layout:
- "plot-latency"
-
--
- type: "plot"
- title: "DPDK Latency 64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc"
- algorithm: "plot_latency_box"
- output-file-type: ".html"
- output-file: "{DIR[STATIC,DPDK]}/64B-2t2c-l2-ndrdisc-lat50"
- data:
- "plot-dpdk-throughput-latency"
- filter: "'64B' and 'BASE' and 'NDRDISC' and '2T2C' and ('L2BDMACSTAT' or 'L2BDMACLRN' or 'L2XCFWD') and not 'VHOST'"
- parameters:
- - "latency"
- - "parent"
- traces:
- boxmean: False
- layout:
- title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-ndrdisc"
- layout:
- "plot-latency"
-
--
- type: "plot"
- title: "DPDK Latency 64B-1t1c-ethip4-ip4base-l3fwd-ndrdisc"
- algorithm: "plot_latency_box"
- output-file-type: ".html"
- output-file: "{DIR[STATIC,DPDK]}/64B-1t1c-ipv4-ndrdisc-lat50"
- data:
- "plot-dpdk-throughput-latency"
- filter: "'64B' and 'BASE' and 'NDRDISC' and '1T1C' and 'IP4FWD'"
- parameters:
- - "latency"
- - "parent"
- traces:
- boxmean: False
- layout:
- title: "64B-1t1c-ethip4-ip4base-l3fwd-ndrdisc"
- layout:
- "plot-latency"
-
--
- type: "plot"
- title: "DPDK Latency 64B-2t2c-ethip4-ip4base-l3fwd-ndrdisc"
- algorithm: "plot_latency_box"
- output-file-type: ".html"
- output-file: "{DIR[STATIC,DPDK]}/64B-2t2c-ipv4-ndrdisc-lat50"
- data:
- "plot-dpdk-throughput-latency"
- filter: "'64B' and 'BASE' and 'NDRDISC' and '2T2C' and 'IP4FWD'"
- parameters:
- - "latency"
- - "parent"
- traces:
- boxmean: False
- layout:
- title: "64B-2t2c-ethip4-ip4base-l3fwd-ndrdisc"
- layout:
- "plot-latency"
-
# Ligato - Throughput
# Container memif
@@ -3063,45 +2963,8 @@
whiskerwidth: 0
layout:
title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc"
- xaxis:
- autorange: True
- autotick: False
- fixedrange: False
- gridcolor: "rgb(238, 238, 238)"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- tickmode: "linear"
- title: "Indexed Test Cases"
- zeroline: False
- yaxis:
- gridcolor: "rgb(238, 238, 238)'"
- hoverformat: ".4s"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- range: [0, 4500000]
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- title: "Packets Per Second [pps]"
- zeroline: False
- boxmode: "group"
- boxgroupgap: 0.5
- autosize: False
- margin:
- t: 50
- b: 20
- l: 50
- r: 20
- showlegend: True
- legend:
- orientation: "h"
- width: 700
- height: 1000
+ layout:
+ "plot-throughput"
-
type: "plot"
@@ -3121,45 +2984,8 @@
whiskerwidth: 0
layout:
title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-ndrdisc"
- xaxis:
- autorange: True
- autotick: False
- fixedrange: False
- gridcolor: "rgb(238, 238, 238)"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- tickmode: "linear"
- title: "Indexed Test Cases"
- zeroline: False
- yaxis:
- gridcolor: "rgb(238, 238, 238)'"
- hoverformat: ".4s"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- range: [0, 8000000]
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- title: "Packets Per Second [pps]"
- zeroline: False
- boxmode: "group"
- boxgroupgap: 0.5
- autosize: False
- margin:
- t: 50
- b: 20
- l: 50
- r: 20
- showlegend: True
- legend:
- orientation: "h"
- width: 700
- height: 1000
+ layout:
+ "plot-throughput"
-
type: "plot"
@@ -3179,45 +3005,8 @@
whiskerwidth: 0
layout:
title: "64B-1t1c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc"
- xaxis:
- autorange: True
- autotick: False
- fixedrange: False
- gridcolor: "rgb(238, 238, 238)"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- tickmode: "linear"
- title: "Indexed Test Cases"
- zeroline: False
- yaxis:
- gridcolor: "rgb(238, 238, 238)'"
- hoverformat: ".4s"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- range: [0, 4500000]
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- title: "Packets Per Second [pps]"
- zeroline: False
- boxmode: "group"
- boxgroupgap: 0.5
- autosize: False
- margin:
- t: 50
- b: 20
- l: 50
- r: 20
- showlegend: True
- legend:
- orientation: "h"
- width: 700
- height: 1000
+ layout:
+ "plot-throughput"
-
type: "plot"
@@ -3237,45 +3026,8 @@
whiskerwidth: 0
layout:
title: "64B-2t2c-(eth|dot1q|dot1ad)-(l2xcbase|l2bdbasemaclrn)-memif-pdrdisc"
- xaxis:
- autorange: True
- autotick: False
- fixedrange: False
- gridcolor: "rgb(238, 238, 238)"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- tickmode: "linear"
- title: "Indexed Test Cases"
- zeroline: False
- yaxis:
- gridcolor: "rgb(238, 238, 238)'"
- hoverformat: ".4s"
- linecolor: "rgb(238, 238, 238)"
- linewidth: 1
- range: [0, 8000000]
- showgrid: True
- showline: True
- showticklabels: True
- tickcolor: "rgb(238, 238, 238)"
- title: "Packets Per Second [pps]"
- zeroline: False
- boxmode: "group"
- boxgroupgap: 0.5
- autosize: False
- margin:
- t: 50
- b: 20
- l: 50
- r: 20
- showlegend: True
- legend:
- orientation: "h"
- width: 700
- height: 1000
+ layout:
+ "plot-throughput"
# Container orchestrated
-
diff --git a/resources/tools/presentation/specification_parser.py b/resources/tools/presentation/specification_parser.py
index 207507e3b6..ebd84530a3 100644
--- a/resources/tools/presentation/specification_parser.py
+++ b/resources/tools/presentation/specification_parser.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 Cisco and/or its affiliates.
+# Copyright (c) 2018 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -49,7 +49,6 @@ class Specification(object):
self._specification = {"environment": dict(),
"configuration": dict(),
- "debug": dict(),
"static": dict(),
"input": dict(),
"output": dict(),
@@ -95,15 +94,6 @@ class Specification(object):
return self._specification["static"]
@property
- def debug(self):
- """Getter - debug
-
- :returns: Debug specification
- :rtype: dict
- """
- return self._specification["debug"]
-
- @property
def is_debug(self):
"""Getter - debug mode
@@ -425,43 +415,6 @@ class Specification(object):
self.configuration["data-sets"][set_name][job] = builds
logging.info("Done.")
- def _parse_debug(self):
- """Parse debug specification in the specification YAML file.
- """
-
- if int(self.environment["configuration"]["CFG[DEBUG]"]) != 1:
- return None
-
- logging.info("Parsing specification file: debug ...")
-
- idx = self._get_type_index("debug")
- if idx is None:
- self.environment["configuration"]["CFG[DEBUG]"] = 0
- return None
-
- try:
- for key, value in self._cfg_yaml[idx]["general"].items():
- self._specification["debug"][key] = value
-
- self._specification["input"]["builds"] = dict()
- for job, builds in self._cfg_yaml[idx]["builds"].items():
- if builds:
- self._specification["input"]["builds"][job] = list()
- for build in builds:
- self._specification["input"]["builds"][job].\
- append({"build": build["build"],
- "status": "downloaded",
- "file-name": self._replace_tags(
- build["file"],
- self.environment["paths"])})
- else:
- logging.warning("No build is defined for the job '{}'. "
- "Trying to continue without it.".
- format(job))
-
- except KeyError:
- raise PresentationError("No data to process.")
-
def _parse_input(self):
"""Parse input specification in the specification YAML file.
@@ -561,6 +514,13 @@ class Specification(object):
except KeyError:
pass
+ try:
+ element["input-file"] = self._replace_tags(
+ element["input-file"],
+ self._specification["environment"]["paths"])
+ except KeyError:
+ pass
+
# add data sets to the elements:
if isinstance(element.get("data", None), str):
data_set = element["data"]
@@ -657,9 +617,7 @@ class Specification(object):
self._parse_env()
self._parse_configuration()
- self._parse_debug()
- if not self.debug:
- self._parse_input()
+ self._parse_input()
self._parse_output()
self._parse_static()
self._parse_elements()
diff --git a/resources/tools/presentation/utils.py b/resources/tools/presentation/utils.py
index 8365bfad5c..0a9d985a88 100644
--- a/resources/tools/presentation/utils.py
+++ b/resources/tools/presentation/utils.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 Cisco and/or its affiliates.
+# Copyright (c) 2018 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -14,6 +14,7 @@
"""General purpose utilities.
"""
+import multiprocessing
import subprocess
import numpy as np
import pandas as pd
@@ -21,7 +22,7 @@ import logging
from os import walk, makedirs, environ
from os.path import join, isdir
-from shutil import copy, Error
+from shutil import move, Error
from math import sqrt
from errors import PresentationError
@@ -68,58 +69,69 @@ def relative_change(nr1, nr2):
return float(((nr2 - nr1) / nr1) * 100)
-def remove_outliers(input_data, outlier_const):
- """
+def remove_outliers(input_list, outlier_const=1.5, window=14):
+ """Return list with outliers removed, using split_outliers.
- :param input_data: Data from which the outliers will be removed.
+ :param input_list: Data from which the outliers will be removed.
:param outlier_const: Outlier constant.
- :type input_data: list
+ :param window: How many preceding values to take into account.
+ :type input_list: list of floats
:type outlier_const: float
+ :type window: int
:returns: The input list without outliers.
- :rtype: list
+ :rtype: list of floats
"""
- data = np.array(input_data)
+ data = np.array(input_list)
upper_quartile = np.percentile(data, 75)
lower_quartile = np.percentile(data, 25)
iqr = (upper_quartile - lower_quartile) * outlier_const
quartile_set = (lower_quartile - iqr, upper_quartile + iqr)
result_lst = list()
- for y in data.tolist():
+ for y in input_list:
if quartile_set[0] <= y <= quartile_set[1]:
result_lst.append(y)
return result_lst
-def find_outliers(input_data, outlier_const=1.5):
+def split_outliers(input_series, outlier_const=1.5, window=14):
"""Go through the input data and generate two pandas series:
- - input data without outliers
+ - input data with outliers replaced by NAN
- outliers.
The function uses IQR to detect outliers.
- :param input_data: Data to be examined for outliers.
+ :param input_series: Data to be examined for outliers.
:param outlier_const: Outlier constant.
- :type input_data: pandas.Series
+ :param window: How many preceding values to take into account.
+ :type input_series: pandas.Series
:type outlier_const: float
- :returns: Tuple: input data with outliers removed; Outliers.
- :rtype: tuple (trimmed_data, outliers)
+ :type window: int
+ :returns: Input data with NAN outliers and Outliers.
+ :rtype: (pandas.Series, pandas.Series)
"""
- upper_quartile = input_data.quantile(q=0.75)
- lower_quartile = input_data.quantile(q=0.25)
- iqr = (upper_quartile - lower_quartile) * outlier_const
- low = lower_quartile - iqr
- high = upper_quartile + iqr
+ list_data = list(input_series.items())
+ head_size = min(window, len(list_data))
+ head_list = list_data[:head_size]
trimmed_data = pd.Series()
outliers = pd.Series()
- for item in input_data.items():
- item_pd = pd.Series([item[1], ], index=[item[0], ])
- if low <= item[1] <= high:
+ for item_x, item_y in head_list:
+ item_pd = pd.Series([item_y, ], index=[item_x, ])
+ trimmed_data = trimmed_data.append(item_pd)
+ for index, (item_x, item_y) in list(enumerate(list_data))[head_size:]:
+ y_rolling_list = [y for (x, y) in list_data[index - head_size:index]]
+ y_rolling_array = np.array(y_rolling_list)
+ q1 = np.percentile(y_rolling_array, 25)
+ q3 = np.percentile(y_rolling_array, 75)
+ iqr = (q3 - q1) * outlier_const
+ low = q1 - iqr
+ item_pd = pd.Series([item_y, ], index=[item_x, ])
+ if low <= item_y:
trimmed_data = trimmed_data.append(item_pd)
else:
- trimmed_data = trimmed_data.append(pd.Series([np.nan, ],
- index=[item[0], ]))
outliers = outliers.append(item_pd)
+ nan_pd = pd.Series([np.nan, ], index=[item_x, ])
+ trimmed_data = trimmed_data.append(nan_pd)
return trimmed_data, outliers
@@ -129,7 +141,7 @@ def get_files(path, extension=None, full_path=True):
:param path: Path to files.
:param extension: Extension of files to process. If it is the empty string,
- all files will be processed.
+ all files will be processed.
:param full_path: If True, the files with full path are generated.
:type path: str
:type extension: str
@@ -187,8 +199,10 @@ def execute_command(cmd):
stdout, stderr = proc.communicate()
- logging.info(stdout)
- logging.info(stderr)
+ if stdout:
+ logging.info(stdout)
+ if stderr:
+ logging.info(stderr)
if proc.returncode != 0:
logging.error(" Command execution failed.")
@@ -239,10 +253,7 @@ def archive_input_data(spec):
logging.info(" Archiving the input data files ...")
- if spec.is_debug:
- extension = spec.debug["input-format"]
- else:
- extension = spec.input["file-format"]
+ extension = spec.input["file-format"]
data_files = get_files(spec.environment["paths"]["DIR[WORKING,DATA]"],
extension=extension)
dst = spec.environment["paths"]["DIR[STATIC,ARCH]"]
@@ -253,11 +264,93 @@ def archive_input_data(spec):
makedirs(dst)
for data_file in data_files:
- logging.info(" Copying the file: {0} ...".format(data_file))
- copy(data_file, dst)
+ logging.info(" Moving the file: {0} ...".format(data_file))
+ move(data_file, dst)
except (Error, OSError) as err:
raise PresentationError("Not possible to archive the input data.",
str(err))
logging.info(" Done.")
+
+
+def classify_anomalies(data, window):
+ """Evaluates if the sample value is an outlier, regression, normal or
+ progression compared to the previous data within the window.
+ We use the intervals defined as:
+ - regress: less than trimmed moving median - 3 * stdev
+ - normal: between trimmed moving median - 3 * stdev and median + 3 * stdev
+ - progress: more than trimmed moving median + 3 * stdev
+ where stdev is trimmed moving standard deviation.
+
+ :param data: Full data set with the outliers replaced by nan.
+ :param window: Window size used to calculate moving average and moving
+ stdev.
+ :type data: pandas.Series
+ :type window: int
+ :returns: Evaluated results.
+ :rtype: list
+ """
+
+ if data.size < 3:
+ return None
+
+ win_size = data.size if data.size < window else window
+ tmm = data.rolling(window=win_size, min_periods=2).median()
+ tmstd = data.rolling(window=win_size, min_periods=2).std()
+
+ classification = ["normal", ]
+ first = True
+ for build, value in data.iteritems():
+ if first:
+ first = False
+ continue
+ if np.isnan(value) or np.isnan(tmm[build]) or np.isnan(tmstd[build]):
+ classification.append("outlier")
+ elif value < (tmm[build] - 3 * tmstd[build]):
+ classification.append("regression")
+ elif value > (tmm[build] + 3 * tmstd[build]):
+ classification.append("progression")
+ else:
+ classification.append("normal")
+ return classification
+
+
+class Worker(multiprocessing.Process):
+ """Worker class used to process tasks in separate parallel processes.
+ """
+
+ def __init__(self, work_queue, data_queue, func):
+ """Initialization.
+
+ :param work_queue: Queue with items to process.
+ :param data_queue: Shared memory between processes. Queue which keeps
+ the result data. This data is then read by the main process and used
+ in further processing.
+ :param func: Function which is executed by the worker.
+ :type work_queue: multiprocessing.JoinableQueue
+ :type data_queue: multiprocessing.Manager().Queue()
+ :type func: Callable object
+ """
+ super(Worker, self).__init__()
+ self._work_queue = work_queue
+ self._data_queue = data_queue
+ self._func = func
+
+ def run(self):
+ """Method representing the process's activity.
+ """
+
+ while True:
+ try:
+ self.process(self._work_queue.get())
+ finally:
+ self._work_queue.task_done()
+
+ def process(self, item_to_process):
+ """Method executed by the runner.
+
+ :param item_to_process: Data to be processed by the function.
+ :type item_to_process: tuple
+ """
+ self._func(self.pid, self._data_queue, *item_to_process)