aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTibor Frank <tifrank@cisco.com>2019-08-02 08:52:26 +0200
committerTibor Frank <tifrank@cisco.com>2019-08-02 06:55:14 +0000
commit7b1f19c0c338f5d8eb65fb45b57bb5de84398762 (patch)
tree5bd46749b03e6e2693cb3e192900ece7aef19378
parentd541b2b7d99651b53bd21ff75cd5fdacf8472a98 (diff)
Trending: Remove multiprocessing from cpta
Change-Id: If53c5a72abed11fda9b15d043b24027d2eb9d86e Signed-off-by: Tibor Frank <tifrank@cisco.com>
-rw-r--r--resources/tools/presentation/generator_CPTA.py70
-rw-r--r--resources/tools/presentation/input_data_parser.py3
2 files changed, 17 insertions, 56 deletions
diff --git a/resources/tools/presentation/generator_CPTA.py b/resources/tools/presentation/generator_CPTA.py
index a9c4d472ec..4f722574dd 100644
--- a/resources/tools/presentation/generator_CPTA.py
+++ b/resources/tools/presentation/generator_CPTA.py
@@ -14,8 +14,6 @@
"""Generation of Continuous Performance Trending and Analysis.
"""
-import multiprocessing
-import os
import logging
import csv
import prettytable
@@ -27,8 +25,7 @@ from collections import OrderedDict
from datetime import datetime
from copy import deepcopy
-from utils import archive_input_data, execute_command, \
- classify_anomalies, Worker
+from utils import archive_input_data, execute_command, classify_anomalies
# Command to build the html format of the report
@@ -324,7 +321,7 @@ def _generate_all_charts(spec, input_data):
:type input_data: InputData
"""
- def _generate_chart(_, data_q, graph):
+ def _generate_chart(graph):
"""Generates the chart.
"""
@@ -499,13 +496,19 @@ def _generate_all_charts(spec, input_data):
except plerr.PlotlyEmptyDataError:
logs.append(("WARNING", "No data for the plot. Skipped."))
- data_out = {
- "job_name": job_name,
- "csv_table": csv_tbl,
- "results": res,
- "logs": logs
- }
- data_q.put(data_out)
+ for level, line in logs:
+ if level == "INFO":
+ logging.info(line)
+ elif level == "ERROR":
+ logging.error(line)
+ elif level == "DEBUG":
+ logging.debug(line)
+ elif level == "CRITICAL":
+ logging.critical(line)
+ elif level == "WARNING":
+ logging.warning(line)
+
+ return {"job_name": job_name, "csv_table": csv_tbl, "results": res}
builds_dict = dict()
for job in spec.input["builds"].keys():
@@ -534,26 +537,6 @@ def _generate_all_charts(spec, input_data):
testbed
)
- work_queue = multiprocessing.JoinableQueue()
- manager = multiprocessing.Manager()
- data_queue = manager.Queue()
- cpus = multiprocessing.cpu_count()
-
- workers = list()
- for cpu in range(cpus):
- worker = Worker(work_queue,
- data_queue,
- _generate_chart)
- worker.daemon = True
- worker.start()
- workers.append(worker)
- os.system("taskset -p -c {0} {1} > /dev/null 2>&1".
- format(cpu, worker.pid))
-
- for chart in spec.cpta["plots"]:
- work_queue.put((chart, ))
- work_queue.join()
-
anomaly_classifications = list()
# Create the header:
@@ -570,31 +553,12 @@ def _generate_all_charts(spec, input_data):
header = "Version:," + ",".join(versions) + '\n'
csv_tables[job_name].append(header)
- while not data_queue.empty():
- result = data_queue.get()
+ for chart in spec.cpta["plots"]:
+ result = _generate_chart(chart)
anomaly_classifications.extend(result["results"])
csv_tables[result["job_name"]].extend(result["csv_table"])
- for item in result["logs"]:
- if item[0] == "INFO":
- logging.info(item[1])
- elif item[0] == "ERROR":
- logging.error(item[1])
- elif item[0] == "DEBUG":
- logging.debug(item[1])
- elif item[0] == "CRITICAL":
- logging.critical(item[1])
- elif item[0] == "WARNING":
- logging.warning(item[1])
-
- del data_queue
-
- # Terminate all workers
- for worker in workers:
- worker.terminate()
- worker.join()
-
# Write the tables:
for job_name, csv_table in csv_tables.items():
file_name = spec.cpta["output-file"] + "-" + job_name + "-trending"
diff --git a/resources/tools/presentation/input_data_parser.py b/resources/tools/presentation/input_data_parser.py
index 2c248fb3af..3b3be9f1cd 100644
--- a/resources/tools/presentation/input_data_parser.py
+++ b/resources/tools/presentation/input_data_parser.py
@@ -19,8 +19,6 @@
- filter the data using tags,
"""
-import multiprocessing
-import os
import re
import resource
import pandas as pd
@@ -38,7 +36,6 @@ from json import loads
from jumpavg.AvgStdevMetadataFactory import AvgStdevMetadataFactory
from input_data_files import download_and_unzip_data_file
-from utils import Worker
# Separator used in file names