aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--docs/cpta/data/index.rst18
-rw-r--r--docs/cpta/stats/dpdk_mrr.rst6
-rw-r--r--resources/tools/presentation/generator_cpta.py59
-rw-r--r--resources/tools/presentation/specifications/trending/elements.yaml2
-rw-r--r--resources/tools/presentation/specifications/trending/layouts.yaml2
5 files changed, 69 insertions, 18 deletions
diff --git a/docs/cpta/data/index.rst b/docs/cpta/data/index.rst
index bf9b485891..4c47f0fde6 100644
--- a/docs/cpta/data/index.rst
+++ b/docs/cpta/data/index.rst
@@ -6,10 +6,6 @@ in the CSV formats:
**VPP MRR**
- - 3n-hsw
-
- - `CSV format <../_static/vpp/csit-vpp-perf-mrr-daily-master-trending.csv>`_
-
- 3n-skx
- `CSV format <../_static/vpp/csit-vpp-perf-mrr-daily-master-3n-skx-trending.csv>`_
@@ -44,28 +40,26 @@ in the CSV formats:
**VPP NDRPDR**
- - 3n-hsw
-
- - `CSV format <../_static/vpp/csit-vpp-perf-ndrpdr-weekly-master-3n-hsw-trending.csv>`_
-
- 3n-skx
- `CSV format <../_static/vpp/csit-vpp-perf-ndrpdr-weekly-master-3n-skx-trending.csv>`_
+ - `Latency (P50 at 50% load, direction 1) in CSV format <../_static/vpp/csit-vpp-perf-ndrpdr-weekly-master-3n-skx-lat-P50-50-d1.csv>`_
+ - `Latency (P50 at 50% load, direction 2) in CSV format <../_static/vpp/csit-vpp-perf-ndrpdr-weekly-master-3n-skx-lat-P50-50-d2.csv>`_
- 2n-skx
- `CSV format <../_static/vpp/csit-vpp-perf-ndrpdr-weekly-master-2n-skx-trending.csv>`_
+ - `Latency (P50 at 50% load, direction 1) in CSV format <../_static/vpp/csit-vpp-perf-ndrpdr-weekly-master-2n-skx-lat-P50-50-d1.csv>`_
+ - `Latency (P50 at 50% load, direction 2) in CSV format <../_static/vpp/csit-vpp-perf-ndrpdr-weekly-master-2n-skx-lat-P50-50-d2.csv>`_
- 2n-clx
- `CSV format <../_static/vpp/csit-vpp-perf-ndrpdr-weekly-master-2n-clx-trending.csv>`_
+ - `Latency (P50 at 50% load, direction 1) in CSV format <../_static/vpp/csit-vpp-perf-ndrpdr-weekly-master-2n-clx-lat-P50-50-d1.csv>`_
+ - `Latency (P50 at 50% load, direction 2) in CSV format <../_static/vpp/csit-vpp-perf-ndrpdr-weekly-master-2n-clx-lat-P50-50-d2.csv>`_
**DPDK MRR**
- - 3n-hsw
-
- - `CSV format <../_static/vpp/csit-dpdk-perf-mrr-weekly-master-trending.csv>`_
-
- 3n-skx
- `CSV format <../_static/vpp/csit-dpdk-perf-mrr-weekly-master-3n-skx-trending.csv>`_
diff --git a/docs/cpta/stats/dpdk_mrr.rst b/docs/cpta/stats/dpdk_mrr.rst
index 23f7cfbe82..9eeeaba92f 100644
--- a/docs/cpta/stats/dpdk_mrr.rst
+++ b/docs/cpta/stats/dpdk_mrr.rst
@@ -45,7 +45,7 @@ DPDK MRR
<center>
Links to builds:
<a href="https://jenkins.fd.io/view/csit/job/csit-vpp-perf-mrr-daily-master-2n-zn2" target="_blank">csit-ref</a>
- <iframe width="1100" height="800" frameborder="0" scrolling="no" src="../_static/vpp/stats-2n-zn2-mrr.html"></iframe>
+ <iframe width="1100" height="800" frameborder="0" scrolling="no" src="../_static/vpp/stats-dpdk-2n-zn2-mrr.html"></iframe>
<p><br></p>
</center>
@@ -57,7 +57,7 @@ DPDK MRR
<center>
Links to builds:
<a href="https://jenkins.fd.io/view/csit/job/csit-vpp-perf-mrr-daily-master-3n-tsh" target="_blank">csit-ref</a>
- <iframe width="1100" height="800" frameborder="0" scrolling="no" src="../_static/vpp/stats-3n-tsh-mrr.html"></iframe>
+ <iframe width="1100" height="800" frameborder="0" scrolling="no" src="../_static/vpp/stats-dpdk-3n-tsh-mrr.html"></iframe>
<p><br></p>
</center>
@@ -69,6 +69,6 @@ DPDK MRR
<center>
Links to builds:
<a href="https://jenkins.fd.io/view/csit/job/csit-vpp-perf-mrr-daily-master-2n-tx2" target="_blank">csit-ref</a>
- <iframe width="1100" height="800" frameborder="0" scrolling="no" src="../_static/vpp/stats-2n-tx2-mrr.html"></iframe>
+ <iframe width="1100" height="800" frameborder="0" scrolling="no" src="../_static/vpp/stats-dpdk-2n-tx2-mrr.html"></iframe>
<p><br></p>
</center>
diff --git a/resources/tools/presentation/generator_cpta.py b/resources/tools/presentation/generator_cpta.py
index 4b10440257..997333a769 100644
--- a/resources/tools/presentation/generator_cpta.py
+++ b/resources/tools/presentation/generator_cpta.py
@@ -398,7 +398,7 @@ def _generate_all_charts(spec, input_data):
data = input_data.filter_tests_by_name(
graph,
- params=[u"type", u"result", u"throughput", u"tags"],
+ params=[u"type", u"result", u"throughput", u"latency", u"tags"],
continue_on_error=True
)
@@ -411,6 +411,8 @@ def _generate_all_charts(spec, input_data):
for ttype in graph.get(u"test-type", (u"mrr", )):
for core in graph.get(u"core", tuple()):
csv_tbl = list()
+ csv_tbl_lat_1 = list()
+ csv_tbl_lat_2 = list()
res = dict()
chart_data = dict()
chart_tags = dict()
@@ -426,6 +428,8 @@ def _generate_all_charts(spec, input_data):
if chart_data.get(test_id, None) is None:
chart_data[test_id] = OrderedDict()
try:
+ lat_1 = u""
+ lat_2 = u""
if ttype == u"mrr":
rate = test[u"result"][u"receive-rate"]
stdev = \
@@ -438,12 +442,23 @@ def _generate_all_charts(spec, input_data):
rate = \
test["throughput"][u"PDR"][u"LOWER"]
stdev = float(u"nan")
+ lat_1 = test[u"latency"][u"PDR50"]\
+ [u"direction1"][u"avg"]
+ lat_2 = test[u"latency"][u"PDR50"]\
+ [u"direction2"][u"avg"]
else:
continue
chart_data[test_id][int(index)] = {
u"receive-rate": rate,
u"receive-stdev": stdev
}
+ if ttype == u"pdr":
+ chart_data[test_id][int(index)].update(
+ {
+ u"lat_1": lat_1,
+ u"lat_2": lat_2
+ }
+ )
chart_tags[test_id] = \
test.get(u"tags", None)
except (KeyError, TypeError):
@@ -452,14 +467,26 @@ def _generate_all_charts(spec, input_data):
# Add items to the csv table:
for tst_name, tst_data in chart_data.items():
tst_lst = list()
+ tst_lst_lat_1 = list()
+ tst_lst_lat_2 = list()
for bld in builds_dict[job_name]:
itm = tst_data.get(int(bld), dict())
# CSIT-1180: Itm will be list, compute stats.
try:
tst_lst.append(str(itm.get(u"receive-rate", u"")))
+ tst_lst_lat_1.append(str(itm.get(u"lat_1", u"")))
+ tst_lst_lat_2.append(str(itm.get(u"lat_2", u"")))
except AttributeError:
tst_lst.append(u"")
+ tst_lst_lat_1.append(u"")
+ tst_lst_lat_2.append(u"")
csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n')
+ csv_tbl_lat_1.append(
+ f"{tst_name}," + u",".join(tst_lst_lat_1) + u"\n"
+ )
+ csv_tbl_lat_2.append(
+ f"{tst_name}," + u",".join(tst_lst_lat_2) + u"\n"
+ )
# Generate traces:
traces = list()
@@ -598,6 +625,8 @@ def _generate_all_charts(spec, input_data):
{
u"job_name": job_name,
u"csv_table": csv_tbl,
+ u"csv_lat_1": csv_tbl_lat_1,
+ u"csv_lat_2": csv_tbl_lat_2,
u"results": res
}
)
@@ -634,17 +663,34 @@ def _generate_all_charts(spec, input_data):
# Create the table header:
csv_tables = dict()
+ csv_tables_l1 = dict()
+ csv_tables_l2 = dict()
for job_name in builds_dict:
if csv_tables.get(job_name, None) is None:
csv_tables[job_name] = list()
+ if csv_tables_l1.get(job_name, None) is None:
+ csv_tables_l1[job_name] = list()
+ if csv_tables_l2.get(job_name, None) is None:
+ csv_tables_l2[job_name] = list()
header = f"Build Number:,{u','.join(builds_dict[job_name])}\n"
csv_tables[job_name].append(header)
+ csv_tables_l1[job_name].append(header)
+ csv_tables_l2[job_name].append(header)
build_dates = [x[0] for x in build_info[job_name].values()]
header = f"Build Date:,{u','.join(build_dates)}\n"
csv_tables[job_name].append(header)
+ csv_tables_l1[job_name].append(header)
+ csv_tables_l2[job_name].append(header)
versions = [x[1] for x in build_info[job_name].values()]
header = f"Version:,{u','.join(versions)}\n"
csv_tables[job_name].append(header)
+ csv_tables_l1[job_name].append(header)
+ csv_tables_l2[job_name].append(header)
+ testbed = [x[2] for x in build_info[job_name].values()]
+ header = f"Test bed:,{u','.join(testbed)}\n"
+ csv_tables[job_name].append(header)
+ csv_tables_l1[job_name].append(header)
+ csv_tables_l2[job_name].append(header)
for chart in spec.cpta[u"plots"]:
results = _generate_chart(chart)
@@ -653,6 +699,8 @@ def _generate_all_charts(spec, input_data):
for result in results:
csv_tables[result[u"job_name"]].extend(result[u"csv_table"])
+ csv_tables_l1[result[u"job_name"]].extend(result[u"csv_lat_1"])
+ csv_tables_l2[result[u"job_name"]].extend(result[u"csv_lat_2"])
if anomaly_classifications.get(result[u"job_name"], None) is None:
anomaly_classifications[result[u"job_name"]] = dict()
@@ -691,6 +739,15 @@ def _generate_all_charts(spec, input_data):
with open(f"{file_name}.txt", u"wt") as txt_file:
txt_file.write(str(txt_table))
+ for job_name, csv_table in csv_tables_l1.items():
+ file_name = f"{spec.cpta[u'output-file']}/{job_name}-lat-P50-50-d1"
+ with open(f"{file_name}.csv", u"wt") as file_handler:
+ file_handler.writelines(csv_table)
+ for job_name, csv_table in csv_tables_l2.items():
+ file_name = f"{spec.cpta[u'output-file']}/{job_name}-lat-P50-50-d2"
+ with open(f"{file_name}.csv", u"wt") as file_handler:
+ file_handler.writelines(csv_table)
+
# Evaluate result:
if anomaly_classifications:
result = u"PASS"
diff --git a/resources/tools/presentation/specifications/trending/elements.yaml b/resources/tools/presentation/specifications/trending/elements.yaml
index a640401c7a..b549948a4f 100644
--- a/resources/tools/presentation/specifications/trending/elements.yaml
+++ b/resources/tools/presentation/specifications/trending/elements.yaml
@@ -69,7 +69,7 @@
- type: "plot"
title: "Statistics: DPDK 2n-clx mrr"
algorithm: "plot_statistics"
- output-file: "{DIR[STATIC,VPP]}/stats-dpdk-2n-skx-mrr"
+ output-file: "{DIR[STATIC,VPP]}/stats-dpdk-2n-clx-mrr"
data: "plot-performance-trending-dpdk-2n-clx"
layout:
layout: "plot-statistics"
diff --git a/resources/tools/presentation/specifications/trending/layouts.yaml b/resources/tools/presentation/specifications/trending/layouts.yaml
index 72bd69a652..aec150fd45 100644
--- a/resources/tools/presentation/specifications/trending/layouts.yaml
+++ b/resources/tools/presentation/specifications/trending/layouts.yaml
@@ -10,7 +10,7 @@
size: 16
autosize: True
showlegend: True
- width: 1100
+ width: 1050
height: 800
yaxis:
showticklabels: True