diff options
author | Tibor Frank <tifrank@cisco.com> | 2019-07-31 12:46:48 +0200 |
---|---|---|
committer | Tibor Frank <tifrank@cisco.com> | 2019-07-31 12:46:48 +0200 |
commit | 20ff879c6dd74b94259998bd252118e8589789d2 (patch) | |
tree | 0417f5b0f280b87f8c987e01924574ace33b558c /resources/tools | |
parent | 85d2e9e114210705c5ea79484692a679ebd4dbea (diff) |
Trending: Remove multiprocessing, part 1
Change-Id: Idcca9b3b933d5524d2ba5905050480796d8a81aa
Signed-off-by: Tibor Frank <tifrank@cisco.com>
Diffstat (limited to 'resources/tools')
-rw-r--r-- | resources/tools/presentation/input_data_parser.py | 105 |
1 files changed, 27 insertions, 78 deletions
diff --git a/resources/tools/presentation/input_data_parser.py b/resources/tools/presentation/input_data_parser.py index db83092837..e330b12221 100644 --- a/resources/tools/presentation/input_data_parser.py +++ b/resources/tools/presentation/input_data_parser.py @@ -1187,13 +1187,10 @@ class InputData(object): return checker.data - def _download_and_parse_build(self, pid, data_queue, job, build, repeat): + def _download_and_parse_build(self, job, build, repeat, pid=10000): """Download and parse the input data file. :param pid: PID of the process executing this method. - :param data_queue: Shared memory between processes. Queue which keeps - the result data. This data is then read by the main process and used - in further processing. :param job: Name of the Jenkins job which generated the processed input file. :param build: Information about the Jenkins build which generated the @@ -1201,7 +1198,6 @@ class InputData(object): :param repeat: Repeat the download specified number of times if not successful. :type pid: int - :type data_queue: multiprocessing.Manager().Queue() :type job: str :type build: dict :type repeat: int @@ -1278,8 +1274,8 @@ class InputData(object): format(name=full_name))) except OSError as err: logs.append(("ERROR", - "Cannot remove the file '{0}': {1}". - format(full_name, repr(err)))) + "Cannot remove the file '{0}': {1}". + format(full_name, repr(err)))) logs.append(("INFO", " Done.")) for level, line in logs: @@ -1294,13 +1290,7 @@ class InputData(object): elif level == "WARNING": logging.warning(line) - result = { - "data": data, - "state": state, - "job": job, - "build": build - } - data_queue.put(result) + return {"data": data, "state": state, "job": job, "build": build} def download_and_parse_data(self, repeat=1): """Download the input data files, parse input data from input files and @@ -1313,76 +1303,35 @@ class InputData(object): logging.info("Downloading and parsing input files ...") - work_queue = multiprocessing.JoinableQueue() - manager = multiprocessing.Manager() - data_queue = manager.Queue() - cpus = multiprocessing.cpu_count() - - logging.info("Nr of CPUs: {0}".format(cpus)) - os.system("lscpu") - - workers = list() - for cpu in range(cpus): - worker = Worker(work_queue, - data_queue, - self._download_and_parse_build) - worker.daemon = True - worker.start() - workers.append(worker) - os.system("taskset -p -c {0} {1} > /dev/null 2>&1". - format(cpu, worker.pid)) - for job, builds in self._cfg.builds.items(): for build in builds: - try: - work_queue.put((job, build, repeat)) - except (MemoryError, EOFError) as err: - logging.error(repr(err)) - work_queue.join() + result = self._download_and_parse_build(job, build, repeat) - logging.info("Done.") + job = result["job"] + build_nr = result["build"]["build"] - logging.info("Collecting data:") + logging.info(" {job}-{build}".format(job=job, + build=build_nr)) + if result["data"]: + data = result["data"] + build_data = pd.Series({ + "metadata": pd.Series( + data["metadata"].values(), + index=data["metadata"].keys()), + "suites": pd.Series(data["suites"].values(), + index=data["suites"].keys()), + "tests": pd.Series(data["tests"].values(), + index=data["tests"].keys())}) - while not data_queue.empty(): - try: - result = data_queue.get() - - job = result["job"] - build_nr = result["build"]["build"] - - logging.info(" {job}-{build}".format(job=job, - build=build_nr)) - if result["data"]: - data = result["data"] - build_data = pd.Series({ - "metadata": pd.Series( - data["metadata"].values(), - index=data["metadata"].keys()), - "suites": pd.Series(data["suites"].values(), - index=data["suites"].keys()), - "tests": pd.Series(data["tests"].values(), - index=data["tests"].keys())}) - - if self._input_data.get(job, None) is None: - self._input_data[job] = pd.Series() - self._input_data[job][str(build_nr)] = build_data - - self._cfg.set_input_file_name( - job, build_nr, result["build"]["file-name"]) - - self._cfg.set_input_state(job, build_nr, result["state"]) - - except (MemoryError, EOFError) as err: - logging.error(repr(err)) - - del data_queue - - # Terminate all workers - for worker in workers: - worker.terminate() - worker.join() + if self._input_data.get(job, None) is None: + self._input_data[job] = pd.Series() + self._input_data[job][str(build_nr)] = build_data + + self._cfg.set_input_file_name( + job, build_nr, result["build"]["file-name"]) + + self._cfg.set_input_state(job, build_nr, result["state"]) logging.info("Done.") |