aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools/presentation/input_data_parser.py
diff options
context:
space:
mode:
authorTibor Frank <tifrank@cisco.com>2019-08-01 12:24:08 +0200
committerTibor Frank <tifrank@cisco.com>2019-08-01 13:19:47 +0200
commitcee3ad0f9cc29ffc67d9c87c58920252671beb21 (patch)
treea119a9456954a6a1130d5bbfc81fde3c051e3b0a /resources/tools/presentation/input_data_parser.py
parent2dd27f5a638b5231c0f074ca61e6b67fed9d1faf (diff)
Trending: Add multiprocessing, remove archiving
Change-Id: I67cfde7dfc9b81fca3ae102b43f6defafe88f689 Signed-off-by: Tibor Frank <tifrank@cisco.com>
Diffstat (limited to 'resources/tools/presentation/input_data_parser.py')
-rw-r--r--resources/tools/presentation/input_data_parser.py91
1 files changed, 67 insertions, 24 deletions
diff --git a/resources/tools/presentation/input_data_parser.py b/resources/tools/presentation/input_data_parser.py
index 37532c83d2..9c0e38073c 100644
--- a/resources/tools/presentation/input_data_parser.py
+++ b/resources/tools/presentation/input_data_parser.py
@@ -23,7 +23,6 @@ import multiprocessing
import os
import re
import resource
-import objgraph
import pandas as pd
import logging
@@ -1189,10 +1188,13 @@ class InputData(object):
return checker.data
- def _download_and_parse_build(self, job, build, repeat, pid=10000):
+ def _download_and_parse_build(self, pid, data_queue, job, build, repeat):
"""Download and parse the input data file.
:param pid: PID of the process executing this method.
+ :param data_queue: Shared memory between processes. Queue which keeps
+ the result data. This data is then read by the main process and used
+ in further processing.
:param job: Name of the Jenkins job which generated the processed input
file.
:param build: Information about the Jenkins build which generated the
@@ -1200,6 +1202,7 @@ class InputData(object):
:param repeat: Repeat the download specified number of times if not
successful.
:type pid: int
+ :type data_queue: multiprocessing.Manager().Queue()
:type job: str
:type build: dict
:type repeat: int
@@ -1280,6 +1283,14 @@ class InputData(object):
format(full_name, repr(err))))
logs.append(("INFO", " Done."))
+ result = {
+ "data": data,
+ "state": state,
+ "job": job,
+ "build": build
+ }
+ data_queue.put(result)
+
for level, line in logs:
if level == "INFO":
logging.info(line)
@@ -1292,7 +1303,8 @@ class InputData(object):
elif level == "WARNING":
logging.warning(line)
- return {"data": data, "state": state, "job": job, "build": build}
+ logging.info("Memory allocation: {0:,d}MB".format(
+ resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
def download_and_parse_data(self, repeat=1):
"""Download the input data files, parse input data from input files and
@@ -1305,36 +1317,67 @@ class InputData(object):
logging.info("Downloading and parsing input files ...")
+ work_queue = multiprocessing.JoinableQueue()
+ manager = multiprocessing.Manager()
+ data_queue = manager.Queue()
+ cpus = multiprocessing.cpu_count()
+
+ workers = list()
+ for cpu in range(cpus):
+ worker = Worker(work_queue,
+ data_queue,
+ self._download_and_parse_build)
+ worker.daemon = True
+ worker.start()
+ workers.append(worker)
+ os.system("taskset -p -c {0} {1} > /dev/null 2>&1".
+ format(cpu, worker.pid))
+
for job, builds in self._cfg.builds.items():
for build in builds:
+ work_queue.put((job, build, repeat))
+
+ work_queue.join()
+
+ logging.info("Done.")
+ logging.info("Collecting data:")
+
+ while not data_queue.empty():
+ result = data_queue.get()
+
+ job = result["job"]
+ build_nr = result["build"]["build"]
+ logging.info(" {job}-{build}".format(job=job, build=build_nr))
- result = self._download_and_parse_build(job, build, repeat)
- build_nr = result["build"]["build"]
+ if result["data"]:
+ data = result["data"]
+ build_data = pd.Series({
+ "metadata": pd.Series(
+ data["metadata"].values(),
+ index=data["metadata"].keys()),
+ "suites": pd.Series(data["suites"].values(),
+ index=data["suites"].keys()),
+ "tests": pd.Series(data["tests"].values(),
+ index=data["tests"].keys())})
- if result["data"]:
- data = result["data"]
- build_data = pd.Series({
- "metadata": pd.Series(
- data["metadata"].values(),
- index=data["metadata"].keys()),
- "suites": pd.Series(data["suites"].values(),
- index=data["suites"].keys()),
- "tests": pd.Series(data["tests"].values(),
- index=data["tests"].keys())})
+ if self._input_data.get(job, None) is None:
+ self._input_data[job] = pd.Series()
+ self._input_data[job][str(build_nr)] = build_data
- if self._input_data.get(job, None) is None:
- self._input_data[job] = pd.Series()
- self._input_data[job][str(build_nr)] = build_data
+ self._cfg.set_input_file_name(
+ job, build_nr, result["build"]["file-name"])
- self._cfg.set_input_file_name(
- job, build_nr, result["build"]["file-name"])
+ self._cfg.set_input_state(job, build_nr, result["state"])
- self._cfg.set_input_state(job, build_nr, result["state"])
+ logging.info("Memory allocation: {0:,d}MB".format(
+ resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000))
- logging.info("ru_maxrss = {0}".format(
- resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))
+ del data_queue
- logging.info(objgraph.most_common_types())
+ # Terminate all workers
+ for worker in workers:
+ worker.terminate()
+ worker.join()
logging.info("Done.")