From 0a7c191db93f3de6c3169fa4de5fb21d02bb3202 Mon Sep 17 00:00:00 2001 From: Tibor Frank Date: Thu, 15 Nov 2018 13:35:12 +0100 Subject: Report: Change versioning and legend of graphs Change-Id: I6381c916582509c45f12d4e6d1d63ffeffc1eaf5 Signed-off-by: Tibor Frank (cherry picked from commit b8287ec997b2d426818fc095f2e4b49ec95d9f49) --- docs/report/introduction/report_history.rst | 31 ++++++++----- .../packet_throughput_graphs/vm_vhost.rst | 2 + resources/tools/presentation/conf.py | 4 +- resources/tools/presentation/generator_CPTA.py | 2 +- resources/tools/presentation/generator_plots.py | 53 +++++++++++----------- resources/tools/presentation/generator_report.py | 8 ++-- resources/tools/presentation/run_report.sh | 2 +- 7 files changed, 56 insertions(+), 46 deletions(-) diff --git a/docs/report/introduction/report_history.rst b/docs/report/introduction/report_history.rst index afe0fd6937..a38ac74c44 100644 --- a/docs/report/introduction/report_history.rst +++ b/docs/report/introduction/report_history.rst @@ -1,18 +1,25 @@ -Report History -============== +FD.io CSIT-18.10 Test Report History +==================================== +---------+--------------------------------------------------------------------+ | Version | Change | +=========+====================================================================+ -| 1.2 | - Add automatic wrapping of long test names in graphs | +| .w47 | 1. Automatic wrapping of long test names in graphs added. | +| | 2. Data and time format in the header changed | +| | 3. Versioning changed. | +| | | +---------+--------------------------------------------------------------------+ -| 1.1 | - Add dot1q KVM VMs vhost-user tests to the VPP performance | -| | graphs. | -| | - Add the information about the number of runs used to generate | -| | the plots. | -| | - Add the test results: | -| | - K8s Container Memif, | -| | - VPP on 3n-hsw testbed. | +| .w46 | 1. dot1q KVM VMs vhost-user tests added to | +| | :ref:`KVM_VMs_vhost`. | +| | | +| | 2. Added number of test runs used to generate data for all graphs | +| | (throughput, speedup multi-core and latency). | +| | | +| | 3. Added more test runs: | +| | | +| | a. K8s Container Memif, | +| | b. VPP on 3n-hsw testbed. | +| | | ++---------+--------------------------------------------------------------------+ +| .w45 | Initial version | +---------+--------------------------------------------------------------------+ -| 1.0 | Initial version | -+---------+--------------------------------------------------------------------+ \ No newline at end of file diff --git a/docs/report/vpp_performance_tests/packet_throughput_graphs/vm_vhost.rst b/docs/report/vpp_performance_tests/packet_throughput_graphs/vm_vhost.rst index c795e4bb69..83b4731248 100644 --- a/docs/report/vpp_performance_tests/packet_throughput_graphs/vm_vhost.rst +++ b/docs/report/vpp_performance_tests/packet_throughput_graphs/vm_vhost.rst @@ -28,6 +28,8 @@ +.. _KVM_VMs_vhost: + KVM VMs vhost-user ================== diff --git a/resources/tools/presentation/conf.py b/resources/tools/presentation/conf.py index 6f25191bc1..85395ab7f8 100644 --- a/resources/tools/presentation/conf.py +++ b/resources/tools/presentation/conf.py @@ -45,7 +45,7 @@ source_suffix = ['.rst', '.md'] master_doc = 'index' # General information about the project. -project = u'FD.io CSIT-18.10.W47' +project = u'FD.io CSIT-18.10.w47' copyright = u'2018, FD.io' author = u'FD.io CSIT' @@ -105,7 +105,7 @@ rst_epilog = """ .. _CSIT Testbed Setup: https://git.fd.io/csit/tree/resources/tools/testbed-setup/README.md?h={release} .. _K8s configuration files: https://github.com/FDio/csit/tree/{release}/resources/templates/kubernetes """.format(release='rls1810', - report_version='v1.2', + report_version='w47', prev_release='rls1807', srelease='1810', csitrelease='18.10', diff --git a/resources/tools/presentation/generator_CPTA.py b/resources/tools/presentation/generator_CPTA.py index 28b64d5776..f230a64c2f 100644 --- a/resources/tools/presentation/generator_CPTA.py +++ b/resources/tools/presentation/generator_CPTA.py @@ -124,7 +124,7 @@ def generate_cpta(spec, data): ret_code = _generate_all_charts(spec, data) cmd = HTML_BUILDER.format( - date=datetime.utcnow().strftime('%Y-%m-%dT%H:%MZ'), + date=datetime.utcnow().strftime('%Y-%m-%d %H:%M UTC'), working_dir=spec.environment["paths"]["DIR[WORKING,SRC]"], build_dir=spec.environment["paths"]["DIR[BUILD,HTML]"]) execute_command(cmd) diff --git a/resources/tools/presentation/generator_plots.py b/resources/tools/presentation/generator_plots.py index 03cee06cb0..2ba2dc03e5 100644 --- a/resources/tools/presentation/generator_plots.py +++ b/resources/tools/presentation/generator_plots.py @@ -144,21 +144,22 @@ def plot_performance_box(plot, input_data): df.head() y_max = list() for i, col in enumerate(df.columns): - name = "{0}. {1}".format(i + 1, col.lower().replace('-ndrpdr', '')) - if len(name) > 60: + name = "{nr}. ({samples:02d} run{plural}) {name}".\ + format(nr=(i + 1), + samples=nr_of_samples[i], + plural='s' if nr_of_samples[i] > 1 else '', + name=col.lower().replace('-ndrpdr', '')) + if len(name) > 50: name_lst = name.split('-') name = "" split_name = True for segment in name_lst: - if (len(name) + len(segment) + 1) > 60 and split_name: + if (len(name) + len(segment) + 1) > 50 and split_name: name += "
" split_name = False name += segment + '-' name = name[:-1] - name = "{name} ({samples} run{plural})".\ - format(name=name, - samples=nr_of_samples[i], - plural='s' if nr_of_samples[i] > 1 else '') + logging.debug(name) traces.append(plgo.Box(x=[str(i + 1) + '.'] * len(df[col]), y=[y / 1000000 if y else None for y in df[col]], @@ -298,12 +299,12 @@ def plot_latency_error_bars(plot, input_data): nr_of_samples = list() for key, val in y_sorted.items(): name = "-".join(key.split("-")[1:-1]) - if len(name) > 60: + if len(name) > 50: name_lst = name.split('-') name = "" split_name = True for segment in name_lst: - if (len(name) + len(segment) + 1) > 60 and split_name: + if (len(name) + len(segment) + 1) > 50 and split_name: name += "
" split_name = False name += segment + '-' @@ -332,11 +333,11 @@ def plot_latency_error_bars(plot, input_data): direction = "West-East" else: direction = "East-West" - hovertext = ("Test: {test}
" - "Direction: {dir}
" - "No. of Runs: {nr}
".format(test=x_vals[idx], - dir=direction, - nr=nr_of_samples[idx])) + hovertext = ("No. of Runs: {nr}
" + "Test: {test}
" + "Direction: {dir}
".format(test=x_vals[idx], + dir=direction, + nr=nr_of_samples[idx])) if isinstance(y_maxs[idx], float): hovertext += "Max: {max:.2f}uSec
".format(max=y_maxs[idx]) if isinstance(y_vals[idx], float): @@ -487,12 +488,12 @@ def plot_throughput_speedup_analysis(plot, input_data): try: if test_vals["1"][1]: name = "-".join(test_name.split('-')[1:-1]) - if len(name) > 60: + if len(name) > 50: name_lst = name.split('-') name = "" split_name = True for segment in name_lst: - if (len(name) + len(segment) + 1) > 60 and split_name: + if (len(name) + len(segment) + 1) > 50 and split_name: name += "
" split_name = False name += segment + '-' @@ -694,9 +695,9 @@ def plot_throughput_speedup_analysis(plot, input_data): for idx in range(len(val["val"])): htext = "" if isinstance(val["val"][idx], float): - htext += "Mean: {0:.2f}Mpps
" \ - "No. of Runs: {1}
".format(val["val"][idx], - val["count"][idx]) + htext += "No. of Runs: {1}
" \ + "Mean: {0:.2f}Mpps
".format(val["val"][idx], + val["count"][idx]) if isinstance(val["diff"][idx], float): htext += "Diff: {0:.0f}%
".format(round(val["diff"][idx])) if isinstance(val["rel"][idx], float): @@ -804,21 +805,21 @@ def plot_http_server_performance_box(plot, input_data): df = pd.DataFrame(y_vals) df.head() for i, col in enumerate(df.columns): - name = "{0}. {1}".format(i + 1, col.lower().replace('-ndrpdr', '')) - if len(name) > 60: + name = "{nr}. ({samples:02d} run{plural}) {name}".\ + format(nr=(i + 1), + samples=nr_of_samples[i], + plural='s' if nr_of_samples[i] > 1 else '', + name=col.lower().replace('-ndrpdr', '')) + if len(name) > 50: name_lst = name.split('-') name = "" split_name = True for segment in name_lst: - if (len(name) + len(segment) + 1) > 60 and split_name: + if (len(name) + len(segment) + 1) > 50 and split_name: name += "
" split_name = False name += segment + '-' name = name[:-1] - name = "{name} ({samples} run{plural})".\ - format(name=name, - samples=nr_of_samples[i], - plural='s' if nr_of_samples[i] > 1 else '') traces.append(plgo.Box(x=[str(i + 1) + '.'] * len(df[col]), y=df[col], diff --git a/resources/tools/presentation/generator_report.py b/resources/tools/presentation/generator_report.py index 0fff90a204..6b28a92908 100644 --- a/resources/tools/presentation/generator_report.py +++ b/resources/tools/presentation/generator_report.py @@ -91,7 +91,7 @@ HTML_BUILDER = 'sphinx-build -v -c . -a ' \ '-b html -E ' \ '-t html ' \ '-D release={release} ' \ - '-D version="Report v{report_version} - {date}" ' \ + '-D version="Test Report {date}" ' \ '{working_dir} ' \ '{build_dir}/' @@ -100,7 +100,7 @@ PDF_BUILDER = 'sphinx-build -v -c . -a ' \ '-b latex -E ' \ '-t latex ' \ '-D release={release} ' \ - '-D version="Report v{report_version} - {date}" ' \ + '-D version="Test Report {date}" ' \ '{working_dir} ' \ '{build_dir}' @@ -157,7 +157,7 @@ def generate_html_report(release, spec, versions, report_version): cmd = HTML_BUILDER.format( release=release, report_version=report_version, - date=datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%MZ'), + date=datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M UTC'), working_dir=working_dir, build_dir=spec.environment["paths"]["DIR[BUILD,HTML]"]) execute_command(cmd) @@ -212,7 +212,7 @@ def generate_pdf_report(release, spec, versions, report_version): cmd = PDF_BUILDER.format( release=release, report_version=report_version, - date=datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%MZ'), + date=datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M UTC'), working_dir=working_dir, build_dir=build_dir) execute_command(cmd) diff --git a/resources/tools/presentation/run_report.sh b/resources/tools/presentation/run_report.sh index 6d770810c5..0438248b8f 100755 --- a/resources/tools/presentation/run_report.sh +++ b/resources/tools/presentation/run_report.sh @@ -38,7 +38,7 @@ export PYTHONPATH=`pwd` python pal.py \ --specification specification.yaml \ --release ${RELEASE} \ - --version "1.2" \ + --version "w47" \ --logging INFO \ --force -- cgit 1.2.3-korg