aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools
diff options
context:
space:
mode:
Diffstat (limited to 'resources/tools')
-rw-r--r--resources/tools/presentation/generator_plots.py606
-rw-r--r--resources/tools/presentation/input_data_parser.py90
-rw-r--r--resources/tools/presentation/specification.yaml55
3 files changed, 751 insertions, 0 deletions
diff --git a/resources/tools/presentation/generator_plots.py b/resources/tools/presentation/generator_plots.py
index 7c8a9f8628..f5bcb0abfa 100644
--- a/resources/tools/presentation/generator_plots.py
+++ b/resources/tools/presentation/generator_plots.py
@@ -61,10 +61,612 @@ def generate_plots(spec, data):
logging.info("Done.")
+def plot_performance_name_box(plot, input_data):
+ """Generate the plot(s) with algorithm: plot_performance_name_box
+ specified in the specification file.
+
+ :param plot: Plot to generate.
+ :param input_data: Data to process.
+ :type plot: pandas.Series
+ :type input_data: InputData
+ """
+
+ # Transform the data
+ plot_title = plot.get("title", "")
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(plot.get("type", ""), plot_title))
+ data = input_data.filter_tests_by_name(
+ plot, params=["throughput", "parent", "tags", "type"])
+ if data is None:
+ logging.error("No data.")
+ return
+
+ # Prepare the data for the plot
+ y_vals = OrderedDict()
+ for job in data:
+ for build in job:
+ for test in build:
+ if y_vals.get(test["parent"], None) is None:
+ y_vals[test["parent"]] = list()
+ try:
+ if test["type"] in ("NDRPDR", ):
+ if "-pdr" in plot_title.lower():
+ y_vals[test["parent"]].\
+ append(test["throughput"]["PDR"]["LOWER"])
+ elif "-ndr" in plot_title.lower():
+ y_vals[test["parent"]]. \
+ append(test["throughput"]["NDR"]["LOWER"])
+ else:
+ continue
+ elif test["type"] in ("SOAK", ):
+ y_vals[test["parent"]].\
+ append(test["throughput"]["LOWER"])
+ else:
+ continue
+ except (KeyError, TypeError):
+ y_vals[test["parent"]].append(None)
+
+ # Add None to the lists with missing data
+ max_len = 0
+ nr_of_samples = list()
+ for val in y_vals.values():
+ if len(val) > max_len:
+ max_len = len(val)
+ nr_of_samples.append(len(val))
+ for key, val in y_vals.items():
+ if len(val) < max_len:
+ val.extend([None for _ in range(max_len - len(val))])
+
+ # Add plot traces
+ traces = list()
+ df = pd.DataFrame(y_vals)
+ df.head()
+ y_max = list()
+ for i, col in enumerate(df.columns):
+ tst_name = re.sub(REGEX_NIC, "",
+ col.lower().replace('-ndrpdr', '').
+ replace('2n1l-', ''))
+ name = "{nr}. ({samples:02d} run{plural}) {name}".\
+ format(nr=(i + 1),
+ samples=nr_of_samples[i],
+ plural='s' if nr_of_samples[i] > 1 else '',
+ name=tst_name)
+
+ logging.debug(name)
+ traces.append(plgo.Box(x=[str(i + 1) + '.'] * len(df[col]),
+ y=[y / 1000000 if y else None for y in df[col]],
+ name=name,
+ hoverinfo="x+y",
+ boxpoints="outliers",
+ whiskerwidth=0))
+ try:
+ val_max = max(df[col])
+ except ValueError as err:
+ logging.error(repr(err))
+ continue
+ if val_max:
+ y_max.append(int(val_max / 1000000) + 2)
+
+ try:
+ # Create plot
+ layout = deepcopy(plot["layout"])
+ if layout.get("title", None):
+ layout["title"] = "<b>Throughput:</b> {0}". \
+ format(layout["title"])
+ if y_max:
+ layout["yaxis"]["range"] = [0, max(y_max)]
+ plpl = plgo.Figure(data=traces, layout=layout)
+
+ # Export Plot
+ file_type = plot.get("output-file-type", ".html")
+ logging.info(" Writing file '{0}{1}'.".
+ format(plot["output-file"], file_type))
+ ploff.plot(plpl, show_link=False, auto_open=False,
+ filename='{0}{1}'.format(plot["output-file"], file_type))
+ except PlotlyError as err:
+ logging.error(" Finished with error: {}".
+ format(repr(err).replace("\n", " ")))
+ return
+
+
+def plot_latency_error_bars_name(plot, input_data):
+ """Generate the plot(s) with algorithm: plot_latency_error_bars_name
+ specified in the specification file.
+
+ :param plot: Plot to generate.
+ :param input_data: Data to process.
+ :type plot: pandas.Series
+ :type input_data: InputData
+ """
+
+ # Transform the data
+ plot_title = plot.get("title", "")
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(plot.get("type", ""), plot_title))
+ data = input_data.filter_tests_by_name(
+ plot, params=["latency", "parent", "tags", "type"])
+ if data is None:
+ logging.error("No data.")
+ return
+
+ # Prepare the data for the plot
+ y_tmp_vals = OrderedDict()
+ for job in data:
+ for build in job:
+ for test in build:
+ try:
+ logging.debug("test['latency']: {0}\n".
+ format(test["latency"]))
+ except ValueError as err:
+ logging.warning(repr(err))
+ if y_tmp_vals.get(test["parent"], None) is None:
+ y_tmp_vals[test["parent"]] = [
+ list(), # direction1, min
+ list(), # direction1, avg
+ list(), # direction1, max
+ list(), # direction2, min
+ list(), # direction2, avg
+ list() # direction2, max
+ ]
+ try:
+ if test["type"] in ("NDRPDR", ):
+ if "-pdr" in plot_title.lower():
+ ttype = "PDR"
+ elif "-ndr" in plot_title.lower():
+ ttype = "NDR"
+ else:
+ logging.warning("Invalid test type: {0}".
+ format(test["type"]))
+ continue
+ y_tmp_vals[test["parent"]][0].append(
+ test["latency"][ttype]["direction1"]["min"])
+ y_tmp_vals[test["parent"]][1].append(
+ test["latency"][ttype]["direction1"]["avg"])
+ y_tmp_vals[test["parent"]][2].append(
+ test["latency"][ttype]["direction1"]["max"])
+ y_tmp_vals[test["parent"]][3].append(
+ test["latency"][ttype]["direction2"]["min"])
+ y_tmp_vals[test["parent"]][4].append(
+ test["latency"][ttype]["direction2"]["avg"])
+ y_tmp_vals[test["parent"]][5].append(
+ test["latency"][ttype]["direction2"]["max"])
+ else:
+ logging.warning("Invalid test type: {0}".
+ format(test["type"]))
+ continue
+ except (KeyError, TypeError) as err:
+ logging.warning(repr(err))
+
+ x_vals = list()
+ y_vals = list()
+ y_mins = list()
+ y_maxs = list()
+ nr_of_samples = list()
+ for key, val in y_tmp_vals.items():
+ name = re.sub(REGEX_NIC, "", key.replace('-ndrpdr', '').
+ replace('2n1l-', ''))
+ x_vals.append(name) # dir 1
+ y_vals.append(mean(val[1]) if val[1] else None)
+ y_mins.append(mean(val[0]) if val[0] else None)
+ y_maxs.append(mean(val[2]) if val[2] else None)
+ nr_of_samples.append(len(val[1]) if val[1] else 0)
+ x_vals.append(name) # dir 2
+ y_vals.append(mean(val[4]) if val[4] else None)
+ y_mins.append(mean(val[3]) if val[3] else None)
+ y_maxs.append(mean(val[5]) if val[5] else None)
+ nr_of_samples.append(len(val[3]) if val[3] else 0)
+
+ traces = list()
+ annotations = list()
+
+ for idx in range(len(x_vals)):
+ if not bool(int(idx % 2)):
+ direction = "West-East"
+ else:
+ direction = "East-West"
+ hovertext = ("No. of Runs: {nr}<br>"
+ "Test: {test}<br>"
+ "Direction: {dir}<br>".format(test=x_vals[idx],
+ dir=direction,
+ nr=nr_of_samples[idx]))
+ if isinstance(y_maxs[idx], float):
+ hovertext += "Max: {max:.2f}uSec<br>".format(max=y_maxs[idx])
+ if isinstance(y_vals[idx], float):
+ hovertext += "Mean: {avg:.2f}uSec<br>".format(avg=y_vals[idx])
+ if isinstance(y_mins[idx], float):
+ hovertext += "Min: {min:.2f}uSec".format(min=y_mins[idx])
+
+ if isinstance(y_maxs[idx], float) and isinstance(y_vals[idx], float):
+ array = [y_maxs[idx] - y_vals[idx], ]
+ else:
+ array = [None, ]
+ if isinstance(y_mins[idx], float) and isinstance(y_vals[idx], float):
+ arrayminus = [y_vals[idx] - y_mins[idx], ]
+ else:
+ arrayminus = [None, ]
+ traces.append(plgo.Scatter(
+ x=[idx, ],
+ y=[y_vals[idx], ],
+ name=x_vals[idx],
+ legendgroup=x_vals[idx],
+ showlegend=bool(int(idx % 2)),
+ mode="markers",
+ error_y=dict(
+ type='data',
+ symmetric=False,
+ array=array,
+ arrayminus=arrayminus,
+ color=COLORS[int(idx / 2)]
+ ),
+ marker=dict(
+ size=10,
+ color=COLORS[int(idx / 2)],
+ ),
+ text=hovertext,
+ hoverinfo="text",
+ ))
+ annotations.append(dict(
+ x=idx,
+ y=0,
+ xref="x",
+ yref="y",
+ xanchor="center",
+ yanchor="top",
+ text="E-W" if bool(int(idx % 2)) else "W-E",
+ font=dict(
+ size=16,
+ ),
+ align="center",
+ showarrow=False
+ ))
+
+ try:
+ # Create plot
+ file_type = plot.get("output-file-type", ".html")
+ logging.info(" Writing file '{0}{1}'.".
+ format(plot["output-file"], file_type))
+ layout = deepcopy(plot["layout"])
+ if layout.get("title", None):
+ layout["title"] = "<b>Latency:</b> {0}".\
+ format(layout["title"])
+ layout["annotations"] = annotations
+ plpl = plgo.Figure(data=traces, layout=layout)
+
+ # Export Plot
+ ploff.plot(plpl,
+ show_link=False, auto_open=False,
+ filename='{0}{1}'.format(plot["output-file"], file_type))
+ except PlotlyError as err:
+ logging.error(" Finished with error: {}".
+ format(str(err).replace("\n", " ")))
+ return
+
+
+def plot_throughput_speedup_analysis_name(plot, input_data):
+ """Generate the plot(s) with algorithm:
+ plot_throughput_speedup_analysis_name
+ specified in the specification file.
+
+ :param plot: Plot to generate.
+ :param input_data: Data to process.
+ :type plot: pandas.Series
+ :type input_data: InputData
+ """
+
+ # Transform the data
+ plot_title = plot.get("title", "")
+ logging.info(" Creating the data set for the {0} '{1}'.".
+ format(plot.get("type", ""), plot_title))
+ data = input_data.filter_tests_by_name(
+ plot, params=["throughput", "parent", "tags", "type"])
+ if data is None:
+ logging.error("No data.")
+ return
+
+ y_vals = OrderedDict()
+ for job in data:
+ for build in job:
+ for test in build:
+ if y_vals.get(test["parent"], None) is None:
+ y_vals[test["parent"]] = {"1": list(),
+ "2": list(),
+ "4": list()}
+ try:
+ if test["type"] in ("NDRPDR",):
+ if "-pdr" in plot_title.lower():
+ ttype = "PDR"
+ elif "-ndr" in plot_title.lower():
+ ttype = "NDR"
+ else:
+ continue
+ if "1C" in test["tags"]:
+ y_vals[test["parent"]]["1"]. \
+ append(test["throughput"][ttype]["LOWER"])
+ elif "2C" in test["tags"]:
+ y_vals[test["parent"]]["2"]. \
+ append(test["throughput"][ttype]["LOWER"])
+ elif "4C" in test["tags"]:
+ y_vals[test["parent"]]["4"]. \
+ append(test["throughput"][ttype]["LOWER"])
+ except (KeyError, TypeError):
+ pass
+
+ if not y_vals:
+ logging.warning("No data for the plot '{}'".
+ format(plot.get("title", "")))
+ return
+
+ y_1c_max = dict()
+ for test_name, test_vals in y_vals.items():
+ for key, test_val in test_vals.items():
+ if test_val:
+ avg_val = sum(test_val) / len(test_val)
+ y_vals[test_name][key] = (avg_val, len(test_val))
+ ideal = avg_val / (int(key) * 1000000.0)
+ if test_name not in y_1c_max or ideal > y_1c_max[test_name]:
+ y_1c_max[test_name] = ideal
+
+ vals = OrderedDict()
+ y_max = list()
+ nic_limit = 0
+ lnk_limit = 0
+ pci_limit = plot["limits"]["pci"]["pci-g3-x8"]
+ for test_name, test_vals in y_vals.items():
+ try:
+ if test_vals["1"][1]:
+ name = re.sub(REGEX_NIC, "", test_name.replace('-ndrpdr', '').
+ replace('2n1l-', ''))
+ vals[name] = OrderedDict()
+ y_val_1 = test_vals["1"][0] / 1000000.0
+ y_val_2 = test_vals["2"][0] / 1000000.0 if test_vals["2"][0] \
+ else None
+ y_val_4 = test_vals["4"][0] / 1000000.0 if test_vals["4"][0] \
+ else None
+
+ vals[name]["val"] = [y_val_1, y_val_2, y_val_4]
+ vals[name]["rel"] = [1.0, None, None]
+ vals[name]["ideal"] = [y_1c_max[test_name],
+ y_1c_max[test_name] * 2,
+ y_1c_max[test_name] * 4]
+ vals[name]["diff"] = [(y_val_1 - y_1c_max[test_name]) * 100 /
+ y_val_1, None, None]
+ vals[name]["count"] = [test_vals["1"][1],
+ test_vals["2"][1],
+ test_vals["4"][1]]
+
+ try:
+ val_max = max(vals[name]["val"])
+ except ValueError as err:
+ logging.error(repr(err))
+ continue
+ if val_max:
+ y_max.append(val_max)
+
+ if y_val_2:
+ vals[name]["rel"][1] = round(y_val_2 / y_val_1, 2)
+ vals[name]["diff"][1] = \
+ (y_val_2 - vals[name]["ideal"][1]) * 100 / y_val_2
+ if y_val_4:
+ vals[name]["rel"][2] = round(y_val_4 / y_val_1, 2)
+ vals[name]["diff"][2] = \
+ (y_val_4 - vals[name]["ideal"][2]) * 100 / y_val_4
+ except IndexError as err:
+ logging.warning("No data for '{0}'".format(test_name))
+ logging.warning(repr(err))
+
+ # Limits:
+ if "x520" in test_name:
+ limit = plot["limits"]["nic"]["x520"]
+ elif "x710" in test_name:
+ limit = plot["limits"]["nic"]["x710"]
+ elif "xxv710" in test_name:
+ limit = plot["limits"]["nic"]["xxv710"]
+ elif "xl710" in test_name:
+ limit = plot["limits"]["nic"]["xl710"]
+ elif "x553" in test_name:
+ limit = plot["limits"]["nic"]["x553"]
+ else:
+ limit = 0
+ if limit > nic_limit:
+ nic_limit = limit
+
+ mul = 2 if "ge2p" in test_name else 1
+ if "10ge" in test_name:
+ limit = plot["limits"]["link"]["10ge"] * mul
+ elif "25ge" in test_name:
+ limit = plot["limits"]["link"]["25ge"] * mul
+ elif "40ge" in test_name:
+ limit = plot["limits"]["link"]["40ge"] * mul
+ elif "100ge" in test_name:
+ limit = plot["limits"]["link"]["100ge"] * mul
+ else:
+ limit = 0
+ if limit > lnk_limit:
+ lnk_limit = limit
+
+ traces = list()
+ annotations = list()
+ x_vals = [1, 2, 4]
+
+ # Limits:
+ try:
+ threshold = 1.1 * max(y_max) # 10%
+ except ValueError as err:
+ logging.error(err)
+ return
+ nic_limit /= 1000000.0
+ traces.append(plgo.Scatter(
+ x=x_vals,
+ y=[nic_limit, ] * len(x_vals),
+ name="NIC: {0:.2f}Mpps".format(nic_limit),
+ showlegend=False,
+ mode="lines",
+ line=dict(
+ dash="dot",
+ color=COLORS[-1],
+ width=1),
+ hoverinfo="none"
+ ))
+ annotations.append(dict(
+ x=1,
+ y=nic_limit,
+ xref="x",
+ yref="y",
+ xanchor="left",
+ yanchor="bottom",
+ text="NIC: {0:.2f}Mpps".format(nic_limit),
+ font=dict(
+ size=14,
+ color=COLORS[-1],
+ ),
+ align="left",
+ showarrow=False
+ ))
+ y_max.append(nic_limit)
+
+ lnk_limit /= 1000000.0
+ if lnk_limit < threshold:
+ traces.append(plgo.Scatter(
+ x=x_vals,
+ y=[lnk_limit, ] * len(x_vals),
+ name="Link: {0:.2f}Mpps".format(lnk_limit),
+ showlegend=False,
+ mode="lines",
+ line=dict(
+ dash="dot",
+ color=COLORS[-2],
+ width=1),
+ hoverinfo="none"
+ ))
+ annotations.append(dict(
+ x=1,
+ y=lnk_limit,
+ xref="x",
+ yref="y",
+ xanchor="left",
+ yanchor="bottom",
+ text="Link: {0:.2f}Mpps".format(lnk_limit),
+ font=dict(
+ size=14,
+ color=COLORS[-2],
+ ),
+ align="left",
+ showarrow=False
+ ))
+ y_max.append(lnk_limit)
+
+ pci_limit /= 1000000.0
+ if (pci_limit < threshold and
+ (pci_limit < lnk_limit * 0.95 or lnk_limit > lnk_limit * 1.05)):
+ traces.append(plgo.Scatter(
+ x=x_vals,
+ y=[pci_limit, ] * len(x_vals),
+ name="PCIe: {0:.2f}Mpps".format(pci_limit),
+ showlegend=False,
+ mode="lines",
+ line=dict(
+ dash="dot",
+ color=COLORS[-3],
+ width=1),
+ hoverinfo="none"
+ ))
+ annotations.append(dict(
+ x=1,
+ y=pci_limit,
+ xref="x",
+ yref="y",
+ xanchor="left",
+ yanchor="bottom",
+ text="PCIe: {0:.2f}Mpps".format(pci_limit),
+ font=dict(
+ size=14,
+ color=COLORS[-3],
+ ),
+ align="left",
+ showarrow=False
+ ))
+ y_max.append(pci_limit)
+
+ # Perfect and measured:
+ cidx = 0
+ for name, val in vals.iteritems():
+ hovertext = list()
+ try:
+ for idx in range(len(val["val"])):
+ htext = ""
+ if isinstance(val["val"][idx], float):
+ htext += "No. of Runs: {1}<br>" \
+ "Mean: {0:.2f}Mpps<br>".format(val["val"][idx],
+ val["count"][idx])
+ if isinstance(val["diff"][idx], float):
+ htext += "Diff: {0:.0f}%<br>".format(
+ round(val["diff"][idx]))
+ if isinstance(val["rel"][idx], float):
+ htext += "Speedup: {0:.2f}".format(val["rel"][idx])
+ hovertext.append(htext)
+ traces.append(plgo.Scatter(x=x_vals,
+ y=val["val"],
+ name=name,
+ legendgroup=name,
+ mode="lines+markers",
+ line=dict(
+ color=COLORS[cidx],
+ width=2),
+ marker=dict(
+ symbol="circle",
+ size=10
+ ),
+ text=hovertext,
+ hoverinfo="text+name"
+ ))
+ traces.append(plgo.Scatter(x=x_vals,
+ y=val["ideal"],
+ name="{0} perfect".format(name),
+ legendgroup=name,
+ showlegend=False,
+ mode="lines",
+ line=dict(
+ color=COLORS[cidx],
+ width=2,
+ dash="dash"),
+ text=["Perfect: {0:.2f}Mpps".format(y)
+ for y in val["ideal"]],
+ hoverinfo="text"
+ ))
+ cidx += 1
+ except (IndexError, ValueError, KeyError) as err:
+ logging.warning("No data for '{0}'".format(name))
+ logging.warning(repr(err))
+
+ try:
+ # Create plot
+ file_type = plot.get("output-file-type", ".html")
+ logging.info(" Writing file '{0}{1}'.".
+ format(plot["output-file"], file_type))
+ layout = deepcopy(plot["layout"])
+ if layout.get("title", None):
+ layout["title"] = "<b>Speedup Multi-core:</b> {0}". \
+ format(layout["title"])
+ layout["yaxis"]["range"] = [0, int(max(y_max) * 1.1)]
+ layout["annotations"].extend(annotations)
+ plpl = plgo.Figure(data=traces, layout=layout)
+
+ # Export Plot
+ ploff.plot(plpl,
+ show_link=False, auto_open=False,
+ filename='{0}{1}'.format(plot["output-file"], file_type))
+ except PlotlyError as err:
+ logging.error(" Finished with error: {}".
+ format(repr(err).replace("\n", " ")))
+ return
+
+
def plot_performance_box(plot, input_data):
"""Generate the plot(s) with algorithm: plot_performance_box
specified in the specification file.
+ TODO: Remove when not needed.
+
:param plot: Plot to generate.
:param input_data: Data to process.
:type plot: pandas.Series
@@ -439,6 +1041,8 @@ def plot_latency_error_bars(plot, input_data):
"""Generate the plot(s) with algorithm: plot_latency_error_bars
specified in the specification file.
+ TODO: Remove when not needed.
+
:param plot: Plot to generate.
:param input_data: Data to process.
:type plot: pandas.Series
@@ -650,6 +1254,8 @@ def plot_throughput_speedup_analysis(plot, input_data):
plot_throughput_speedup_analysis
specified in the specification file.
+ TODO: Remove when not needed.
+
:param plot: Plot to generate.
:param input_data: Data to process.
:type plot: pandas.Series
diff --git a/resources/tools/presentation/input_data_parser.py b/resources/tools/presentation/input_data_parser.py
index bbbf0a9ae0..f47f1bc6df 100644
--- a/resources/tools/presentation/input_data_parser.py
+++ b/resources/tools/presentation/input_data_parser.py
@@ -1497,6 +1497,96 @@ class InputData(object):
"tags are enclosed by apostrophes.".format(cond))
return None
+ def filter_tests_by_name(self, element, params=None, data_set="tests",
+ continue_on_error=False):
+ """Filter required data from the given jobs and builds.
+
+ The output data structure is:
+
+ - job 1
+ - build 1
+ - test (or suite) 1 ID:
+ - param 1
+ - param 2
+ ...
+ - param n
+ ...
+ - test (or suite) n ID:
+ ...
+ ...
+ - build n
+ ...
+ - job n
+
+ :param element: Element which will use the filtered data.
+ :param params: Parameters which will be included in the output. If None,
+ all parameters are included.
+ :param data_set: The set of data to be filtered: tests, suites,
+ metadata.
+ :param continue_on_error: Continue if there is error while reading the
+ data. The Item will be empty then
+ :type element: pandas.Series
+ :type params: list
+ :type data_set: str
+ :type continue_on_error: bool
+ :returns: Filtered data.
+ :rtype pandas.Series
+ """
+
+ include = element.get("include", None)
+ if not include:
+ logging.warning("No tests to include, skipping the element.")
+ return None
+
+ if params is None:
+ params = element.get("parameters", None)
+ if params:
+ params.append("type")
+
+ data = pd.Series()
+ try:
+ for job, builds in element["data"].items():
+ data[job] = pd.Series()
+ for build in builds:
+ data[job][str(build)] = pd.Series()
+ for test in include:
+ try:
+ reg_ex = re.compile(str(test).lower())
+ for test_ID in self.data[job][str(build)]\
+ [data_set].keys():
+ if re.match(reg_ex, str(test_ID).lower()):
+ test_data = self.data[job][str(build)]\
+ [data_set][test_ID]
+ data[job][str(build)][test_ID] = pd.Series()
+ if params is None:
+ for param, val in test_data.items():
+ data[job][str(build)][test_ID]\
+ [param] = val
+ else:
+ for param in params:
+ try:
+ data[job][str(build)][test_ID]\
+ [param] = test_data[param]
+ except KeyError:
+ data[job][str(build)][test_ID]\
+ [param] = "No Data"
+ except KeyError as err:
+ logging.error("{err!r}".format(err=err))
+ if continue_on_error:
+ continue
+ else:
+ return None
+ return data
+
+ except (KeyError, IndexError, ValueError) as err:
+ logging.error("Missing mandatory parameter in the element "
+ "specification: {err!r}".format(err=err))
+ return None
+ except AttributeError as err:
+ logging.error("{err!r}".format(err=err))
+ return None
+
+
@staticmethod
def merge_data(data):
"""Merge data from more jobs and builds to a simple data structure.
diff --git a/resources/tools/presentation/specification.yaml b/resources/tools/presentation/specification.yaml
index 21ce8542bf..02141ac162 100644
--- a/resources/tools/presentation/specification.yaml
+++ b/resources/tools/presentation/specification.yaml
@@ -6300,6 +6300,61 @@
################################################################################
+# Example plots
+
+# Packet Throughput - VPP L2 3n-skx-x710 base and scale
+- type: "plot"
+ title: "Throughput: l2sw-3n-skx-x710-64b-2t1c-base_and_scale-ndr"
+ algorithm: "plot_performance_name_box"
+ output-file: "{DIR[STATIC,VPP]}/l2sw-3n-skx-x710-64b-2t1c-base_and_scale-ndr"
+ data: "plot-vpp-throughput-lat-tsa-3n-skx"
+ include:
+ - "Tests.Vpp.Perf.L2.10Ge2P1X710-Eth-L2Patch-Ndrpdr.64B-2t1c-eth-l2patch-ndrpdr"
+ - "Tests.Vpp.Perf.L2.10Ge2P1X710-Eth-L2Xcbase-Ndrpdr.64B-2t1c-eth-l2xcbase-ndrpdr"
+ - "Tests.Vpp.Perf.L2.10Ge2P1X710-Eth-L2Bdbasemaclrn-Ndrpdr.64B-2t1c-eth-l2bdbasemaclrn-ndrpdr"
+ - "Tests.Vpp.Perf.L2.10Ge2P1X710-Eth-L2Bdscale10Kmaclrn-Ndrpdr.64B-2t1c-eth-l2bdscale10kmaclrn-ndrpdr"
+ - "Tests.Vpp.Perf.L2.10Ge2P1X710-Eth-L2Bdscale100Kmaclrn-Ndrpdr.64B-2t1c-eth-l2bdscale100kmaclrn-ndrpdr"
+ - "Tests.Vpp.Perf.L2.10Ge2P1X710-Eth-L2Bdscale1Mmaclrn-Ndrpdr.64B-2t1c-eth-l2bdscale1mmaclrn-ndrpdr"
+ layout:
+ title: "l2sw-3n-skx-x710-64b-2t1c-base_and_scale-ndr"
+ layout: "plot-throughput"
+
+# Packet Latency - VPP L2 3n-skx-x710 base and scale
+- type: "plot"
+ title: "Latency: l2sw-3n-skx-x710-64b-2t1c-base_and_scale-ndr-lat"
+ algorithm: "plot_latency_error_bars_name"
+ output-file: "{DIR[STATIC,VPP]}/l2sw-3n-skx-x710-64b-2t1c-base_and_scale-ndr-lat"
+ data: "plot-vpp-throughput-lat-tsa-3n-skx"
+ include:
+ - "Tests.Vpp.Perf.L2.10Ge2P1X710-Eth-L2Patch-Ndrpdr.64B-2t1c-eth-l2patch-ndrpdr"
+ - "Tests.Vpp.Perf.L2.10Ge2P1X710-Eth-L2Xcbase-Ndrpdr.64B-2t1c-eth-l2xcbase-ndrpdr"
+ - "Tests.Vpp.Perf.L2.10Ge2P1X710-Eth-L2Bdbasemaclrn-Ndrpdr.64B-2t1c-eth-l2bdbasemaclrn-ndrpdr"
+ - "Tests.Vpp.Perf.L2.10Ge2P1X710-Eth-L2Bdscale10Kmaclrn-Ndrpdr.64B-2t1c-eth-l2bdscale10kmaclrn-ndrpdr"
+ - "Tests.Vpp.Perf.L2.10Ge2P1X710-Eth-L2Bdscale100Kmaclrn-Ndrpdr.64B-2t1c-eth-l2bdscale100kmaclrn-ndrpdr"
+ - "Tests.Vpp.Perf.L2.10Ge2P1X710-Eth-L2Bdscale1Mmaclrn-Ndrpdr.64B-2t1c-eth-l2bdscale1mmaclrn-ndrpdr"
+ layout:
+ title: "l2sw-3n-skx-x710-64b-2t1c-base_and_scale-ndr"
+ layout: "plot-latency"
+
+# Speedup - VPP L2 3n-skx-x710 base and scale
+- type: "plot"
+ title: "Speedup: l2sw-3n-skx-x710-64b-base_and_scale-ndr-tsa"
+ algorithm: "plot_throughput_speedup_analysis_name"
+ output-file: "{DIR[STATIC,VPP]}/l2sw-3n-skx-x710-64b-base_and_scale-ndr-tsa"
+ data: "plot-vpp-throughput-lat-tsa-3n-skx"
+ include:
+ - "Tests.Vpp.Perf.L2.10Ge2P1X710-Eth-L2Patch-Ndrpdr.64B-.t.c-eth-l2patch-ndrpdr"
+ - "Tests.Vpp.Perf.L2.10Ge2P1X710-Eth-L2Xcbase-Ndrpdr.64B-.t.c-eth-l2xcbase-ndrpdr"
+ - "Tests.Vpp.Perf.L2.10Ge2P1X710-Eth-L2Bdbasemaclrn-Ndrpdr.64B-.t.c-eth-l2bdbasemaclrn-ndrpdr"
+ - "Tests.Vpp.Perf.L2.10Ge2P1X710-Eth-L2Bdscale10Kmaclrn-Ndrpdr.64B-.t.c-eth-l2bdscale10kmaclrn-ndrpdr"
+ - "Tests.Vpp.Perf.L2.10Ge2P1X710-Eth-L2Bdscale100Kmaclrn-Ndrpdr.64B-.t.c-eth-l2bdscale100kmaclrn-ndrpdr"
+ - "Tests.Vpp.Perf.L2.10Ge2P1X710-Eth-L2Bdscale1Mmaclrn-Ndrpdr.64B-.t.c-eth-l2bdscale1mmaclrn-ndrpdr"
+ layout:
+ title: "l2sw-3n-skx-x710-64b-base_and_scale-ndr"
+ layout: "plot-throughput-speedup-analysis"
+
+################################################################################
+
# Packet Throughput - VPP L2 3n-hsw-x520 base and scale
- type: "plot"
title: "VPP Throughput: l2sw-3n-hsw-x520-64b-1t1c-base_and_scale-ndr"