aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools/presentation/generator_plots.py
diff options
context:
space:
mode:
Diffstat (limited to 'resources/tools/presentation/generator_plots.py')
-rw-r--r--resources/tools/presentation/generator_plots.py188
1 files changed, 110 insertions, 78 deletions
diff --git a/resources/tools/presentation/generator_plots.py b/resources/tools/presentation/generator_plots.py
index 628ea534ee..0f660999dd 100644
--- a/resources/tools/presentation/generator_plots.py
+++ b/resources/tools/presentation/generator_plots.py
@@ -129,9 +129,11 @@ def plot_performance_box(plot, input_data):
# Add None to the lists with missing data
max_len = 0
+ nr_of_samples = list()
for val in y_sorted.values():
if len(val) > max_len:
max_len = len(val)
+ nr_of_samples.append(len(val))
for key, val in y_sorted.items():
if len(val) < max_len:
val.extend([None for _ in range(max_len - len(val))])
@@ -142,8 +144,11 @@ def plot_performance_box(plot, input_data):
df.head()
y_max = list()
for i, col in enumerate(df.columns):
- name = "{0}. {1}".format(i + 1, col.lower().replace('-ndrpdrdisc', '').
- replace('-ndrpdr', ''))
+ name = "{0}. {1} ({2} run{3})".\
+ format(i + 1,
+ col.lower().replace('-ndrpdr', ''),
+ nr_of_samples[i],
+ 's' if nr_of_samples[i] > 1 else '')
logging.debug(name)
traces.append(plgo.Box(x=[str(i + 1) + '.'] * len(df[col]),
y=[y / 1000000 if y else None for y in df[col]],
@@ -248,6 +253,7 @@ def plot_latency_error_bars(plot, input_data):
except (KeyError, TypeError) as err:
logging.warning(repr(err))
logging.debug("y_tmp_vals: {0}\n".format(y_tmp_vals))
+
# Sort the tests
order = plot.get("sort", None)
if order and y_tags:
@@ -279,21 +285,25 @@ def plot_latency_error_bars(plot, input_data):
y_vals = list()
y_mins = list()
y_maxs = list()
+ nr_of_samples = list()
for key, val in y_sorted.items():
key = "-".join(key.split("-")[1:-1])
x_vals.append(key) # dir 1
y_vals.append(mean(val[1]) if val[1] else None)
y_mins.append(mean(val[0]) if val[0] else None)
y_maxs.append(mean(val[2]) if val[2] else None)
+ nr_of_samples.append(len(val[1]) if val[1] else 0)
x_vals.append(key) # dir 2
y_vals.append(mean(val[4]) if val[4] else None)
y_mins.append(mean(val[3]) if val[3] else None)
y_maxs.append(mean(val[5]) if val[5] else None)
+ nr_of_samples.append(len(val[3]) if val[3] else 0)
logging.debug("x_vals :{0}\n".format(x_vals))
logging.debug("y_vals :{0}\n".format(y_vals))
logging.debug("y_mins :{0}\n".format(y_mins))
logging.debug("y_maxs :{0}\n".format(y_maxs))
+ logging.debug("nr_of_samples :{0}\n".format(nr_of_samples))
traces = list()
annotations = list()
@@ -303,8 +313,10 @@ def plot_latency_error_bars(plot, input_data):
else:
direction = "East - West"
hovertext = ("Test: {test}<br>"
- "Direction: {dir}<br>".format(test=x_vals[idx],
- dir=direction))
+ "Direction: {dir}<br>"
+ "No. of Runs: {nr}<br>".format(test=x_vals[idx],
+ dir=direction,
+ nr=nr_of_samples[idx]))
if isinstance(y_maxs[idx], float):
hovertext += "Max: {max:.2f}uSec<br>".format(max=y_maxs[idx])
if isinstance(y_vals[idx], float):
@@ -441,7 +453,7 @@ def plot_throughput_speedup_analysis(plot, input_data):
for key, test_val in test_vals.items():
if test_val:
avg_val = sum(test_val) / len(test_val)
- y_vals[test_name][key] = avg_val
+ y_vals[test_name][key] = (avg_val, len(test_val))
ideal = avg_val / (int(key) * 1000000.0)
if test_name not in y_1c_max or ideal > y_1c_max[test_name]:
y_1c_max[test_name] = ideal
@@ -452,38 +464,47 @@ def plot_throughput_speedup_analysis(plot, input_data):
lnk_limit = 0
pci_limit = plot["limits"]["pci"]["pci-g3-x8"]
for test_name, test_vals in y_vals.items():
- if test_vals["1"]:
- name = "-".join(test_name.split('-')[1:-1])
-
- vals[name] = dict()
- y_val_1 = test_vals["1"] / 1000000.0
- y_val_2 = test_vals["2"] / 1000000.0 if test_vals["2"] else None
- y_val_4 = test_vals["4"] / 1000000.0 if test_vals["4"] else None
-
- vals[name]["val"] = [y_val_1, y_val_2, y_val_4]
- vals[name]["rel"] = [1.0, None, None]
- vals[name]["ideal"] = [y_1c_max[test_name],
- y_1c_max[test_name] * 2,
- y_1c_max[test_name] * 4]
- vals[name]["diff"] = \
- [(y_val_1 - y_1c_max[test_name]) * 100 / y_val_1, None, None]
-
- try:
- val_max = max(max(vals[name]["val"], vals[name]["ideal"]))
- except ValueError as err:
- logging.error(err)
- continue
- if val_max:
- y_max.append(int((val_max / 10) + 1) * 10)
-
- if y_val_2:
- vals[name]["rel"][1] = round(y_val_2 / y_val_1, 2)
- vals[name]["diff"][1] = \
- (y_val_2 - vals[name]["ideal"][1]) * 100 / y_val_2
- if y_val_4:
- vals[name]["rel"][2] = round(y_val_4 / y_val_1, 2)
- vals[name]["diff"][2] = \
- (y_val_4 - vals[name]["ideal"][2]) * 100 / y_val_4
+ try:
+ if test_vals["1"][1]:
+ name = "-".join(test_name.split('-')[1:-1])
+
+ vals[name] = dict()
+ y_val_1 = test_vals["1"][0] / 1000000.0
+ y_val_2 = test_vals["2"][0] / 1000000.0 if test_vals["2"][0] \
+ else None
+ y_val_4 = test_vals["4"][0] / 1000000.0 if test_vals["4"][0] \
+ else None
+
+ vals[name]["val"] = [y_val_1, y_val_2, y_val_4]
+ vals[name]["rel"] = [1.0, None, None]
+ vals[name]["ideal"] = [y_1c_max[test_name],
+ y_1c_max[test_name] * 2,
+ y_1c_max[test_name] * 4]
+ vals[name]["diff"] = [(y_val_1 - y_1c_max[test_name]) * 100 /
+ y_val_1, None, None]
+ vals[name]["count"] = [test_vals["1"][1],
+ test_vals["2"][1],
+ test_vals["4"][1]]
+
+ try:
+ val_max = max(max(vals[name]["val"], vals[name]["ideal"]))
+ except ValueError as err:
+ logging.error(err)
+ continue
+ if val_max:
+ y_max.append(int((val_max / 10) + 1) * 10)
+
+ if y_val_2:
+ vals[name]["rel"][1] = round(y_val_2 / y_val_1, 2)
+ vals[name]["diff"][1] = \
+ (y_val_2 - vals[name]["ideal"][1]) * 100 / y_val_2
+ if y_val_4:
+ vals[name]["rel"][2] = round(y_val_4 / y_val_1, 2)
+ vals[name]["diff"][2] = \
+ (y_val_4 - vals[name]["ideal"][2]) * 100 / y_val_4
+ except IndexError as err:
+ logging.warning("No data for '{0}'".format(test_name))
+ logging.warning(repr(err))
# Limits:
if "x520" in test_name:
@@ -639,45 +660,51 @@ def plot_throughput_speedup_analysis(plot, input_data):
cidx = 0
for name, val in y_sorted.iteritems():
hovertext = list()
- for idx in range(len(val["val"])):
- htext = ""
- if isinstance(val["val"][idx], float):
- htext += "value: {0:.2f}Mpps<br>".format(val["val"][idx])
- if isinstance(val["diff"][idx], float):
- htext += "diff: {0:.0f}%<br>".format(round(val["diff"][idx]))
- if isinstance(val["rel"][idx], float):
- htext += "speedup: {0:.2f}".format(val["rel"][idx])
- hovertext.append(htext)
- traces.append(plgo.Scatter(x=x_vals,
- y=val["val"],
- name=name,
- legendgroup=name,
- mode="lines+markers",
- line=dict(
- color=COLORS[cidx],
- width=2),
- marker=dict(
- symbol="circle",
- size=10
- ),
- text=hovertext,
- hoverinfo="text+name"
- ))
- traces.append(plgo.Scatter(x=x_vals,
- y=val["ideal"],
- name="{0} perfect".format(name),
- legendgroup=name,
- showlegend=False,
- mode="lines",
- line=dict(
- color=COLORS[cidx],
- width=2,
- dash="dash"),
- text=["perfect: {0:.2f}Mpps".format(y)
- for y in val["ideal"]],
- hoverinfo="text"
- ))
- cidx += 1
+ try:
+ for idx in range(len(val["val"])):
+ htext = ""
+ if isinstance(val["val"][idx], float):
+ htext += "Value: {0:.2f}Mpps<br>" \
+ "No. of Runs: {1}<br>".format(val["val"][idx],
+ val["count"][idx])
+ if isinstance(val["diff"][idx], float):
+ htext += "Diff: {0:.0f}%<br>".format(round(val["diff"][idx]))
+ if isinstance(val["rel"][idx], float):
+ htext += "Speedup: {0:.2f}".format(val["rel"][idx])
+ hovertext.append(htext)
+ traces.append(plgo.Scatter(x=x_vals,
+ y=val["val"],
+ name=name,
+ legendgroup=name,
+ mode="lines+markers",
+ line=dict(
+ color=COLORS[cidx],
+ width=2),
+ marker=dict(
+ symbol="circle",
+ size=10
+ ),
+ text=hovertext,
+ hoverinfo="text+name"
+ ))
+ traces.append(plgo.Scatter(x=x_vals,
+ y=val["ideal"],
+ name="{0} perfect".format(name),
+ legendgroup=name,
+ showlegend=False,
+ mode="lines",
+ line=dict(
+ color=COLORS[cidx],
+ width=2,
+ dash="dash"),
+ text=["perfect: {0:.2f}Mpps".format(y)
+ for y in val["ideal"]],
+ hoverinfo="text"
+ ))
+ cidx += 1
+ except (IndexError, ValueError, KeyError) as err:
+ logging.warning("No data for '{0}'".format(name))
+ logging.warning(repr(err))
try:
# Create plot
@@ -733,9 +760,11 @@ def plot_http_server_performance_box(plot, input_data):
# Add None to the lists with missing data
max_len = 0
+ nr_of_samples = list()
for val in y_vals.values():
if len(val) > max_len:
max_len = len(val)
+ nr_of_samples.append(len(val))
for key, val in y_vals.items():
if len(val) < max_len:
val.extend([None for _ in range(max_len - len(val))])
@@ -745,8 +774,11 @@ def plot_http_server_performance_box(plot, input_data):
df = pd.DataFrame(y_vals)
df.head()
for i, col in enumerate(df.columns):
- name = "{0}. {1}".format(i + 1, col.lower().replace('-cps', '').
- replace('-rps', ''))
+ name = "{0}. {1} ({2} run{3})".\
+ format(i + 1,
+ col.lower().replace('-cps', '').replace('-rps', ''),
+ nr_of_samples[i],
+ 's' if nr_of_samples[i] > 1 else '')
traces.append(plgo.Box(x=[str(i + 1) + '.'] * len(df[col]),
y=df[col],
name=name,