aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools/presentation/generator_alerts.py
diff options
context:
space:
mode:
authorTibor Frank <tifrank@cisco.com>2019-11-20 11:43:44 +0100
committerTibor Frank <tifrank@cisco.com>2019-12-03 14:22:39 +0000
commitcbfa26dc0f5334bcd367c161b4eaad342355bbde (patch)
tree0e2c9cec7e956f914dcb8a1b1865ff4e3d7a47fd /resources/tools/presentation/generator_alerts.py
parent375aeaab2c14e45ebe45c35947381dc248b32097 (diff)
Python3: PAL
- files renamed: - utils.py --> pal_utils.py - errors.py --> pal_errors.py - functions/methods renamed: - plot_service_density_reconf_box_name --> plot_nf_reconf_box_name - plot_performance_box_name --> plot_perf_box_name - plot_latency_error_bars_name --> plot_lat_err_bars_name - plot_throughput_speedup_analysis_name --> plot_tsa_name - plot_service_density_heatmap --> plot_nf_heatmap - table_performance_comparison --> table_perf_comparison - table_performance_comparison_nic --> table_perf_comparison_nic - table_performance_trending_dashboard_html --> table_perf_trending_dash_html - functions/methods removed: - plot_service_density_heatmap_compare - plot_throughput_speedup_analysis - plot_latency_error_bars - plot_soak_boxes - plot_soak_bars Change-Id: Icddc01d3ccb451abb92b9e5d912b642d01866033 Signed-off-by: Tibor Frank <tifrank@cisco.com>
Diffstat (limited to 'resources/tools/presentation/generator_alerts.py')
-rw-r--r--resources/tools/presentation/generator_alerts.py393
1 files changed, 205 insertions, 188 deletions
diff --git a/resources/tools/presentation/generator_alerts.py b/resources/tools/presentation/generator_alerts.py
index 3a9b5ddfb6..10c6734aad 100644
--- a/resources/tools/presentation/generator_alerts.py
+++ b/resources/tools/presentation/generator_alerts.py
@@ -11,6 +11,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+"""Generator of alerts:
+- failed tests
+- regressions
+- progressions
+"""
+
+
import smtplib
import logging
@@ -19,8 +26,8 @@ from email.mime.multipart import MIMEMultipart
from os.path import isdir
from collections import OrderedDict
-from utils import get_last_completed_build_number
-from errors import PresentationError
+from pal_utils import get_last_completed_build_number
+from pal_errors import PresentationError
class AlertingError(PresentationError):
@@ -34,7 +41,7 @@ class AlertingError(PresentationError):
- relevant data if there are any collected (optional parameter details).
"""
- def __init__(self, msg, details='', level="CRITICAL"):
+ def __init__(self, msg, details=u'', level=u"CRITICAL"):
"""Sets the exception message and the level.
:param msg: Short description of the encountered problem.
@@ -48,13 +55,13 @@ class AlertingError(PresentationError):
:type level: str
"""
- super(AlertingError, self).__init__(
- "Alerting: {0}".format(msg), details, level)
+ super(AlertingError, self).__init__(f"Alerting: {msg}", details, level)
def __repr__(self):
return (
- "AlertingError(msg={msg!r},details={dets!r},level={level!r})".
- format(msg=self._msg, dets=self._details, level=self._level))
+ f"AlertingError(msg={self._msg!r},details={self._details!r},"
+ f"level={self._level!r})"
+ )
class Alerting:
@@ -69,57 +76,58 @@ class Alerting:
"""
# Implemented alerts:
- self._ALERTS = ("failed-tests", )
+ self._implemented_alerts = (u"failed-tests", )
self._spec = spec
try:
self._spec_alert = spec.alerting
except KeyError as err:
- raise AlertingError("Alerting is not configured, skipped.",
- repr(err),
- "WARNING")
+ raise AlertingError(u"Alerting is not configured, skipped.",
+ repr(err),
+ u"WARNING")
- self._path_failed_tests = spec.environment["paths"]["DIR[STATIC,VPP]"]
+ self._path_failed_tests = spec.environment[u"paths"][u"DIR[STATIC,VPP]"]
# Verify and validate input specification:
- self.configs = self._spec_alert.get("configurations", None)
+ self.configs = self._spec_alert.get(u"configurations", None)
if not self.configs:
- raise AlertingError("No alert configuration is specified.")
- for config_type, config_data in self.configs.iteritems():
- if config_type == "email":
- if not config_data.get("server", None):
- raise AlertingError("Parameter 'server' is missing.")
- if not config_data.get("address-to", None):
- raise AlertingError("Parameter 'address-to' (recipient) is "
- "missing.")
- if not config_data.get("address-from", None):
- raise AlertingError("Parameter 'address-from' (sender) is "
- "missing.")
- elif config_type == "jenkins":
- if not isdir(config_data.get("output-dir", "")):
- raise AlertingError("Parameter 'output-dir' is "
- "missing or it is not a directory.")
- if not config_data.get("output-file", None):
- raise AlertingError("Parameter 'output-file' is missing.")
+ raise AlertingError(u"No alert configuration is specified.")
+ for config_type, config_data in self.configs.items():
+ if config_type == u"email":
+ if not config_data.get(u"server", None):
+ raise AlertingError(u"Parameter 'server' is missing.")
+ if not config_data.get(u"address-to", None):
+ raise AlertingError(u"Parameter 'address-to' (recipient) "
+ u"is missing.")
+ if not config_data.get(u"address-from", None):
+ raise AlertingError(u"Parameter 'address-from' (sender) is "
+ u"missing.")
+ elif config_type == u"jenkins":
+ if not isdir(config_data.get(u"output-dir", u"")):
+ raise AlertingError(u"Parameter 'output-dir' is "
+ u"missing or it is not a directory.")
+ if not config_data.get(u"output-file", None):
+ raise AlertingError(u"Parameter 'output-file' is missing.")
else:
- raise AlertingError("Alert of type '{0}' is not implemented.".
- format(config_type))
+ raise AlertingError(
+ f"Alert of type {config_type} is not implemented."
+ )
- self.alerts = self._spec_alert.get("alerts", None)
+ self.alerts = self._spec_alert.get(u"alerts", None)
if not self.alerts:
- raise AlertingError("No alert is specified.")
- for alert, alert_data in self.alerts.iteritems():
- if not alert_data.get("title", None):
- raise AlertingError("Parameter 'title' is missing.")
- if not alert_data.get("type", None) in self._ALERTS:
- raise AlertingError("Parameter 'failed-tests' is missing or "
- "incorrect.")
- if not alert_data.get("way", None) in self.configs.keys():
- raise AlertingError("Parameter 'way' is missing or incorrect.")
- if not alert_data.get("include", None):
- raise AlertingError("Parameter 'include' is missing or the "
- "list is empty.")
+ raise AlertingError(u"No alert is specified.")
+ for alert_data in self.alerts.values():
+ if not alert_data.get(u"title", None):
+ raise AlertingError(u"Parameter 'title' is missing.")
+ if not alert_data.get(u"type", None) in self._implemented_alerts:
+ raise AlertingError(u"Parameter 'failed-tests' is missing or "
+ u"incorrect.")
+ if not alert_data.get(u"way", None) in self.configs.keys():
+ raise AlertingError(u"Parameter 'way' is missing or incorrect.")
+ if not alert_data.get(u"include", None):
+ raise AlertingError(u"Parameter 'include' is missing or the "
+ u"list is empty.")
def __str__(self):
"""Return string with human readable description of the alert.
@@ -127,8 +135,7 @@ class Alerting:
:returns: Readable description.
:rtype: str
"""
- return "configs={configs}, alerts={alerts}".format(
- configs=self.configs, alerts=self.alerts)
+ return f"configs={self.configs}, alerts={self.alerts}"
def __repr__(self):
"""Return string executable as Python constructor call.
@@ -136,19 +143,19 @@ class Alerting:
:returns: Executable constructor call.
:rtype: str
"""
- return "Alerting(spec={spec})".format(
- spec=self._spec)
+ return f"Alerting(spec={self._spec})"
def generate_alerts(self):
"""Generate alert(s) using specified way(s).
"""
- for alert, alert_data in self.alerts.iteritems():
- if alert_data["way"] == "jenkins":
+ for alert_data in self.alerts.values():
+ if alert_data[u"way"] == u"jenkins":
self._generate_email_body(alert_data)
else:
- raise AlertingError("Alert with way '{0}' is not implemented.".
- format(alert_data["way"]))
+ raise AlertingError(
+ f"Alert with way {alert_data[u'way']} is not implemented."
+ )
@staticmethod
def _send_email(server, addr_from, addr_to, subject, text=None, html=None):
@@ -169,29 +176,29 @@ class Alerting:
"""
if not text and not html:
- raise AlertingError("No text/data to send.")
+ raise AlertingError(u"No text/data to send.")
- msg = MIMEMultipart('alternative')
- msg['Subject'] = subject
- msg['From'] = addr_from
- msg['To'] = ", ".join(addr_to)
+ msg = MIMEMultipart(u'alternative')
+ msg[u'Subject'] = subject
+ msg[u'From'] = addr_from
+ msg[u'To'] = u", ".join(addr_to)
if text:
- msg.attach(MIMEText(text, 'plain'))
+ msg.attach(MIMEText(text, u'plain'))
if html:
- msg.attach(MIMEText(html, 'html'))
+ msg.attach(MIMEText(html, u'html'))
smtp_server = None
try:
- logging.info("Trying to send alert '{0}' ...".format(subject))
- logging.debug("SMTP Server: {0}".format(server))
- logging.debug("From: {0}".format(addr_from))
- logging.debug("To: {0}".format(", ".join(addr_to)))
- logging.debug("Message: {0}".format(msg.as_string()))
+ logging.info(f"Trying to send alert {subject} ...")
+ logging.debug(f"SMTP Server: {server}")
+ logging.debug(f"From: {addr_from}")
+ logging.debug(f"To: {u', '.join(addr_to)}")
+ logging.debug(f"Message: {msg.as_string()}")
smtp_server = smtplib.SMTP(server)
smtp_server.sendmail(addr_from, addr_to, msg.as_string())
except smtplib.SMTPException as err:
- raise AlertingError("Not possible to send the alert via email.",
+ raise AlertingError(u"Not possible to send the alert via email.",
str(err))
finally:
if smtp_server:
@@ -242,12 +249,12 @@ class Alerting:
:rtype: tuple(str, str, int, int, OrderedDict)
"""
- directory = self.configs[alert["way"]]["output-dir"]
+ directory = self.configs[alert[u"way"]][u"output-dir"]
failed_tests = OrderedDict()
- file_path = "{0}/{1}.txt".format(directory, test_set)
- version = ""
+ file_path = f"{directory}/{test_set}.txt"
+ version = u""
try:
- with open(file_path, 'r') as f_txt:
+ with open(file_path, u'r') as f_txt:
for idx, line in enumerate(f_txt):
if idx == 0:
build = line[:-1]
@@ -262,36 +269,69 @@ class Alerting:
failed = line[:-1]
continue
try:
- test = line[:-1].split('-')
- nic = test[0]
- framesize = test[1]
- cores = test[2]
- name = '-'.join(test[3:-1])
+ test = line[:-1].split(u'-')
+ name = u'-'.join(test[3:-1])
except IndexError:
continue
if failed_tests.get(name, None) is None:
failed_tests[name] = dict(nics=list(),
framesizes=list(),
cores=list())
- if nic not in failed_tests[name]["nics"]:
- failed_tests[name]["nics"].append(nic)
- if framesize not in failed_tests[name]["framesizes"]:
- failed_tests[name]["framesizes"].append(framesize)
- if cores not in failed_tests[name]["cores"]:
- failed_tests[name]["cores"].append(cores)
+ if test[0] not in failed_tests[name][u"nics"]:
+ failed_tests[name][u"nics"].append(test[0])
+ if test[1] not in failed_tests[name][u"framesizes"]:
+ failed_tests[name][u"framesizes"].append(test[1])
+ if test[2] not in failed_tests[name][u"cores"]:
+ failed_tests[name][u"cores"].append(test[2])
except IOError:
- logging.error("No such file or directory: {file}".
- format(file=file_path))
+ logging.error(f"No such file or directory: {file_path}")
return None, None, None, None, None
if sort:
sorted_failed_tests = OrderedDict()
- keys = [k for k in failed_tests.keys()]
- keys.sort()
- for key in keys:
+ for key in sorted(failed_tests.keys()):
sorted_failed_tests[key] = failed_tests[key]
return build, version, passed, failed, sorted_failed_tests
- else:
- return build, version, passed, failed, failed_tests
+
+ return build, version, passed, failed, failed_tests
+
+ def _list_gressions(self, alert, idx, header, re_pro):
+ """Create a file with regressions or progressions for the test set
+ specified by idx.
+
+ :param alert: Files are created for this alert.
+ :param idx: Index of the test set as it is specified in the
+ specification file.
+ :param header: The header of the list of [re|pro]gressions.
+ :param re_pro: 'regression' or 'progression'.
+ :type alert: dict
+ :type idx: int
+ :type header: str
+ :type re_pro: str
+ """
+
+ if re_pro not in (u"regressions", u"progressions"):
+ return
+
+ in_file = (
+ f"{self.configs[alert[u'way']][u'output-dir']}/"
+ f"cpta-{re_pro}-{alert[u'urls'][idx].split(u'/')[-1]}.txt"
+ )
+ out_file = (
+ f"{self.configs[alert[u'way']][u'output-dir']}/"
+ f"trending-{re_pro}.txt"
+ )
+
+ try:
+ with open(in_file, u'r') as txt_file:
+ file_content = txt_file.read()
+ with open(out_file, u'a+') as reg_file:
+ reg_file.write(header)
+ if file_content:
+ reg_file.write(file_content)
+ else:
+ reg_file.write(f"No {re_pro}")
+ except IOError as err:
+ logging.warning(repr(err))
def _generate_email_body(self, alert):
"""Create the file which is used in the generated alert.
@@ -300,120 +340,97 @@ class Alerting:
:type alert: dict
"""
- if alert["type"] != "failed-tests":
- raise AlertingError("Alert of type '{0}' is not implemented.".
- format(alert["type"]))
+ if alert[u"type"] != u"failed-tests":
+ raise AlertingError(
+ f"Alert of type {alert[u'type']} is not implemented."
+ )
- config = self.configs[alert["way"]]
-
- text = ""
- for idx, test_set in enumerate(alert.get("include", [])):
+ text = u""
+ for idx, test_set in enumerate(alert.get(u"include", [])):
build, version, passed, failed, failed_tests = \
self._get_compressed_failed_tests(alert, test_set)
if build is None:
ret_code, build_nr, _ = get_last_completed_build_number(
- self._spec.environment["urls"]["URL[JENKINS,CSIT]"],
- alert["urls"][idx].split('/')[-1])
+ self._spec.environment[u"urls"][u"URL[JENKINS,CSIT]"],
+ alert[u"urls"][idx].split(u'/')[-1])
if ret_code != 0:
- build_nr = ''
- text += "\n\nNo input data available for '{set}'. See CSIT " \
- "build {link}/{build} for more information.\n".\
- format(set='-'.join(test_set.split('-')[-2:]),
- link=alert["urls"][idx],
- build=build_nr)
+ build_nr = u''
+ text += (
+ f"\n\nNo input data available for "
+ f"{u'-'.join(test_set.split('-')[-2:])}. See CSIT build "
+ f"{alert[u'urls'][idx]}/{build_nr} for more information.\n"
+ )
continue
- text += ("\n\n{topo}-{arch}, "
- "{failed} tests failed, "
- "{passed} tests passed, "
- "CSIT build: {link}/{build}, "
- "VPP version: {version}\n\n".
- format(topo=test_set.split('-')[-2],
- arch=test_set.split('-')[-1],
- failed=failed,
- passed=passed,
- link=alert["urls"][idx],
- build=build,
- version=version))
- regression_hdr = ("\n\n{topo}-{arch}, "
- "CSIT build: {link}/{build}, "
- "VPP version: {version}\n\n"
- .format(topo=test_set.split('-')[-2],
- arch=test_set.split('-')[-1],
- link=alert["urls"][idx],
- build=build,
- version=version
- ))
- max_len_name = 0
- max_len_nics = 0
- max_len_framesizes = 0
- max_len_cores = 0
- for name, params in failed_tests.items():
- failed_tests[name]["nics"] = ",".join(sorted(params["nics"]))
- failed_tests[name]["framesizes"] = \
- ",".join(sorted(params["framesizes"]))
- failed_tests[name]["cores"] = ",".join(sorted(params["cores"]))
- if len(name) > max_len_name:
- max_len_name = len(name)
- if len(failed_tests[name]["nics"]) > max_len_nics:
- max_len_nics = len(failed_tests[name]["nics"])
- if len(failed_tests[name]["framesizes"]) > max_len_framesizes:
- max_len_framesizes = len(failed_tests[name]["framesizes"])
- if len(failed_tests[name]["cores"]) > max_len_cores:
- max_len_cores = len(failed_tests[name]["cores"])
+ text += (
+ f"\n\n{test_set.split('-')[-2]}-{test_set.split('-')[-1]}, "
+ f"{failed} tests failed, "
+ f"{passed} tests passed, CSIT build: "
+ f"{alert[u'urls'][idx]}/{build}, VPP version: {version}\n\n"
+ )
+
+ class MaxLens():
+ """Class to store the max lengths of strings displayed in
+ failed tests list.
+ """
+ def __init__(self, tst_name, nics, framesizes, cores):
+ """Initialisation.
+
+ :param tst_name: Name of the test.
+ :param nics: NICs used in the test.
+ :param framesizes: Frame sizes used in the tests
+ :param cores: Cores used in th test.
+ """
+ self.name = tst_name
+ self.nics = nics
+ self.frmsizes = framesizes
+ self.cores = cores
+
+ max_len = MaxLens(0, 0, 0, 0)
for name, params in failed_tests.items():
- text += "{name} {nics} {frames} {cores}\n".format(
- name=name + " " * (max_len_name - len(name)),
- nics=params["nics"] +
- " " * (max_len_nics - len(params["nics"])),
- frames=params["framesizes"] + " " *
- (max_len_framesizes - len(params["framesizes"])),
- cores=params["cores"] +
- " " * (max_len_cores - len(params["cores"])))
+ failed_tests[name][u"nics"] = u",".join(sorted(params[u"nics"]))
+ failed_tests[name][u"framesizes"] = \
+ u",".join(sorted(params[u"framesizes"]))
+ failed_tests[name][u"cores"] = \
+ u",".join(sorted(params[u"cores"]))
+ if len(name) > max_len.name:
+ max_len.name = len(name)
+ if len(failed_tests[name][u"nics"]) > max_len.nics:
+ max_len.nics = len(failed_tests[name][u"nics"])
+ if len(failed_tests[name][u"framesizes"]) > max_len.frmsizes:
+ max_len.frmsizes = len(failed_tests[name][u"framesizes"])
+ if len(failed_tests[name][u"cores"]) > max_len.cores:
+ max_len.cores = len(failed_tests[name][u"cores"])
+ for name, params in failed_tests.items():
+ text += (
+ f"{name + u' ' * (max_len.name - len(name))} "
+ f"{params[u'nics']}"
+ f"{u' ' * (max_len.nics - len(params[u'nics']))} "
+ f"{params[u'framesizes']}"
+ f"{u' ' * (max_len.frmsizes-len(params[u'framesizes']))} "
+ f"{params[u'cores']}"
+ f"{u' ' * (max_len.cores - len(params[u'cores']))}\n"
+ )
+
+ gression_hdr = (
+ f"\n\n{test_set.split(u'-')[-2]}-{test_set.split(u'-')[-1]}, "
+ f"CSIT build: {alert[u'urls'][idx]}/{build}, "
+ f"VPP version: {version}\n\n"
+ )
# Add list of regressions:
- file_name = "{0}/cpta-regressions-{1}.txt".\
- format(config["output-dir"], alert["urls"][idx].split('/')[-1])
- try:
- with open(file_name, 'r') as txt_file:
- file_content = txt_file.read()
- reg_file_name = "{dir}/trending-regressions.txt". \
- format(dir=config["output-dir"])
- with open(reg_file_name, 'a+') as reg_file:
- reg_file.write(regression_hdr)
- if file_content:
- reg_file.write(file_content)
- else:
- reg_file.write("No regressions")
- except IOError as err:
- logging.warning(repr(err))
+ self._list_gressions(alert, idx, gression_hdr, u"regressions")
# Add list of progressions:
- file_name = "{0}/cpta-progressions-{1}.txt".\
- format(config["output-dir"], alert["urls"][idx].split('/')[-1])
- try:
- with open(file_name, 'r') as txt_file:
- file_content = txt_file.read()
- pro_file_name = "{dir}/trending-progressions.txt". \
- format(dir=config["output-dir"])
- with open(pro_file_name, 'a+') as pro_file:
- pro_file.write(regression_hdr)
- if file_content:
- pro_file.write(file_content)
- else:
- pro_file.write("No progressions")
- except IOError as err:
- logging.warning(repr(err))
-
- text += "\nFor detailed information visit: {url}\n".\
- format(url=alert["url-details"])
- file_name = "{0}/{1}".format(config["output-dir"],
- config["output-file"])
- logging.info("Writing the file '{0}.txt' ...".format(file_name))
+ self._list_gressions(alert, idx, gression_hdr, u"progressions")
+
+ text += f"\nFor detailed information visit: {alert[u'url-details']}\n"
+ file_name = f"{self.configs[alert[u'way']][u'output-dir']}/" \
+ f"{self.configs[alert[u'way']][u'output-file']}"
+ logging.info(f"Writing the file {file_name}.txt ...")
try:
- with open("{0}.txt".format(file_name), 'w') as txt_file:
+ with open(f"{file_name}.txt", u'w') as txt_file:
txt_file.write(text)
except IOError:
- logging.error("Not possible to write the file '{0}.txt'.".
- format(file_name))
+ logging.error(f"Not possible to write the file {file_name}.txt.")