diff options
author | pmikus <pmikus@cisco.com> | 2021-07-08 12:34:33 +0000 |
---|---|---|
committer | Peter Mikus <pmikus@cisco.com> | 2021-08-05 14:45:31 +0000 |
commit | 37a55a05cd6ac7ca15bbb99f5ddc9ca61874d5f8 (patch) | |
tree | 238f6ba28684a78dd1d0a8724148f4b930872b40 /jjb/scripts | |
parent | c841491aa2a606ce68867a6e05f5d9cf1dbb6d9f (diff) |
Global: Rework archive artifacts
This patch removes archive-artifacts-parameter macro
and ARCHVIVE_ARTIFACTS env var from csit and vpp project.
All project specific artifacts to be uploaded with the
log files SHOULD BE copied to $WORKSPACE/archives.
The next step once this is merged will be to remove NEXUS
entirely via JCasC.
+ Remove archive-artifacts from all csit/vpp yaml files.
+ Add fdio-infra-ship-backup-logs macro
+ Remove unused jjb/include-raw-deploy-archives.sh
+ CSIT:
- copy job artifacts to $WORKSPACE/archives
+ HC2VPP
- remove CSIT
+ TLDK:
- remove CSIT
Signed-off-by: pmikus <pmikus@cisco.com>
Signed-off-by: Dave Wallace <dwallacelf@gmail.com>
Change-Id: Iada020cf269714c34f9ce32d764d991827e3b003
Diffstat (limited to 'jjb/scripts')
-rwxr-xr-x | jjb/scripts/backup_upload_archives.sh | 189 | ||||
-rw-r--r-- | jjb/scripts/csit/device-semiweekly.sh | 1 | ||||
-rw-r--r-- | jjb/scripts/csit/perf-timed.sh | 1 | ||||
-rw-r--r-- | jjb/scripts/csit/tldk-functional-virl.sh | 41 | ||||
-rw-r--r-- | jjb/scripts/logs_publish.sh | 291 | ||||
-rwxr-xr-x | jjb/scripts/post_build_deploy_archives.sh | 33 | ||||
-rw-r--r-- | jjb/scripts/vpp/csit-device.sh | 1 | ||||
-rw-r--r-- | jjb/scripts/vpp/csit-perf.sh | 2 |
8 files changed, 296 insertions, 263 deletions
diff --git a/jjb/scripts/backup_upload_archives.sh b/jjb/scripts/backup_upload_archives.sh deleted file mode 100755 index 6cedc8025..000000000 --- a/jjb/scripts/backup_upload_archives.sh +++ /dev/null @@ -1,189 +0,0 @@ -#!/bin/bash - -# Copyright (c) 2021 Cisco and/or its affiliates. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -echo "---> jjb/scripts/backup_upload_archives.sh" - -PYTHON_SCRIPT="/w/workspace/test-logs/artifact.py" - -# This script uploads the artifacts to a backup upload location -if [ -f "$PYTHON_SCRIPT" ]; then - echo "WARNING: $PYTHON_SCRIPT already exists - assume backup archive upload already done" - exit 0 -fi - -# the Python code below needs boto3 installed -python3 -m pip install boto3 -mkdir -p $(dirname "$PYTHON_SCRIPT") - -cat >$PYTHON_SCRIPT <<'END_OF_PYTHON_SCRIPT' -#!/usr/bin/python3 - -"""Storage utilities library.""" - -import argparse -import gzip -import os -from mimetypes import MimeTypes - -from boto3 import resource -from botocore.client import Config - -ENDPOINT_URL = u"http://storage.service.consul:9000" -AWS_ACCESS_KEY_ID = u"storage" -AWS_SECRET_ACCESS_KEY = u"Storage1234" -REGION_NAME = u"yul1" -COMPRESS_MIME = ( - u"text/html", - u"text/xml", - u"application/octet-stream" -) - - -def compress(src_fpath): - """Compress a single file. - - :param src_fpath: Input file path. - :type src_fpath: str - """ - with open(src_fpath, u"rb") as orig_file: - with gzip.open(src_fpath + ".gz", u"wb") as zipped_file: - zipped_file.writelines(orig_file) - - -def upload(storage, bucket, src_fpath, dst_fpath): - """Upload single file to destination bucket. - - :param storage: S3 storage resource. - :param bucket: S3 bucket name. - :param src_fpath: Input file path. - :param dst_fpath: Destination file path on remote storage. - :type storage: Object - :type bucket: str - :type src_fpath: str - :type dst_fpath: str - """ - mime_guess = MimeTypes().guess_type(src_fpath) - mime = mime_guess[0] - encoding = mime_guess[1] - if not mime: - mime = "application/octet-stream" - - if mime in COMPRESS_MIME and bucket in "logs" and encoding != "gzip": - compress(src_fpath) - src_fpath = src_fpath + ".gz" - dst_fpath = dst_fpath + ".gz" - - extra_args = dict() - extra_args['ContentType'] = mime - - storage.Bucket(bucket + ".fd.io").upload_file( - src_fpath, - dst_fpath, - ExtraArgs=extra_args - ) - print("https://" + bucket + ".nginx.service.consul/" + dst_fpath) - - -def upload_recursive(storage, bucket, src_fpath): - """Recursively uploads input folder to destination. - - Example: - - bucket: logs - - src_fpath: /home/user - - dst_fpath: logs.fd.io/home/user - - :param storage: S3 storage resource. - :param bucket: S3 bucket name. - :param src_fpath: Input folder path. - :type storage: Object - :type bucket: str - :type src_fpath: str - """ - for path, _, files in os.walk(src_fpath): - for file in files: - _path = path.replace(src_fpath, u"") - _dir = src_fpath[1:] if src_fpath[0] == "/" else src_fpath - _dst_fpath = os.path.normpath(_dir + "/" + _path + "/" + file) - _src_fpath = os.path.join(path, file) - upload(storage, bucket, _src_fpath, _dst_fpath) - - -def main(): - """Main function for storage manipulation.""" - - parser = argparse.ArgumentParser() - parser.add_argument( - u"-d", u"--dir", required=True, type=str, - help=u"Directory to upload to storage." - ) - parser.add_argument( - u"-b", u"--bucket", required=True, type=str, - help=u"Target bucket on storage." - ) - args = parser.parse_args() - - # Create main storage resource. - storage = resource( - u"s3", - endpoint_url=ENDPOINT_URL, - aws_access_key_id=AWS_ACCESS_KEY_ID, - aws_secret_access_key=AWS_SECRET_ACCESS_KEY, - config=Config( - signature_version=u"s3v4" - ), - region_name=REGION_NAME - ) - - upload_recursive( - storage=storage, - bucket=args.bucket, - src_fpath=args.dir - ) - - -if __name__ == u"__main__": - main() - -END_OF_PYTHON_SCRIPT - -WS_ARCHIVES_DIR="$WORKSPACE/archives" -TMP_ARCHIVES_DIR="/tmp/archives" -JENKINS_BUILD_ARCHIVE_DIR="$TMP_ARCHIVES_DIR/$JENKINS_HOSTNAME/$JOB_NAME/$BUILD_NUMBER" - -mkdir -p $JENKINS_BUILD_ARCHIVE_DIR - -if [ -e "$WS_ARCHIVES_DIR" ]; then - echo "Found $WS_ARCHIVES_DIR, uploading its contents" - cp -r $WS_ARCHIVES_DIR/* $JENKINS_BUILD_ARCHIVE_DIR -else - echo "No $WS_ARCHIVES_DIR found. Creating a dummy file." - echo "No archives found while doing backup upload" > "$JENKINS_BUILD_ARCHIVE_DIR/no-archives-found.txt" -fi - -console_log="$JENKINS_BUILD_ARCHIVE_DIR/console.log" -echo "Retrieving Jenkins console log to '$console_log'" -wget -qO "$console_log" "$BUILD_URL/consoleText" - -console_log="$JENKINS_BUILD_ARCHIVE_DIR/console-timestamp.log" -echo "Retrieving Jenkins console timestamp log to '$console_log'" -wget -qO "$console_log" "$BUILD_URL/timestamps?time=HH:mm:ss&appendLog" - -pushd $TMP_ARCHIVES_DIR -echo "Contents of the archives dir '$TMP_ARCHIVES_DIR':" -ls -alR $TMP_ARCHIVES_DIR -archive_cmd="python3 $PYTHON_SCRIPT -d . -b logs" -echo -e "\nRunning uploader script '$archive_cmd':\n" -$archive_cmd || echo "Failed to upload logs" -popd diff --git a/jjb/scripts/csit/device-semiweekly.sh b/jjb/scripts/csit/device-semiweekly.sh index ff82cb5c8..ea405b2b5 100644 --- a/jjb/scripts/csit/device-semiweekly.sh +++ b/jjb/scripts/csit/device-semiweekly.sh @@ -47,3 +47,4 @@ git checkout "${BRANCH_NAME}" popd csit_entry_dir="${WORKSPACE}/csit/resources/libraries/bash/entry" source "${csit_entry_dir}/bootstrap_vpp_device.sh" +cp -R "${WORKSPACE}/csit/archives" "${WORKSPACE}/archives" || true diff --git a/jjb/scripts/csit/perf-timed.sh b/jjb/scripts/csit/perf-timed.sh index 93566550a..6d31f9ef2 100644 --- a/jjb/scripts/csit/perf-timed.sh +++ b/jjb/scripts/csit/perf-timed.sh @@ -40,3 +40,4 @@ fi popd csit_entry_dir="${WORKSPACE}/csit/resources/libraries/bash/entry" source "${csit_entry_dir}/with_oper_for_vpp.sh" "bootstrap_verify_perf.sh" +cp -R "${WORKSPACE}/csit/archives" "${WORKSPACE}/archives" || true diff --git a/jjb/scripts/csit/tldk-functional-virl.sh b/jjb/scripts/csit/tldk-functional-virl.sh deleted file mode 100644 index 5f3090139..000000000 --- a/jjb/scripts/csit/tldk-functional-virl.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash - -# Copyright (c) 2020 Cisco and/or its affiliates. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -echo "---> jjb/scripts/csit/tldk-functional-virl.sh" - -set -xeu -o pipefail - -# Clone tldk and start tests -git clone https://gerrit.fd.io/r/tldk - -# If the git clone fails, complain clearly and exit -if [ $? != 0 ]; then - echo "Failed to run: git clone https://gerrit.fd.io/r/tldk" - exit 1 -fi - -# execute tldk bootstrap script if it exists -if [ -e bootstrap-TLDK.sh ] -then - # make sure that bootstrap-TLDK.sh is executable - chmod +x bootstrap-TLDK.sh - # run the script - ./bootstrap-TLDK.sh -else - echo 'ERROR: No bootstrap-TLDK.sh found' - exit 1 -fi - -# vim: ts=4 ts=4 sts=4 et : diff --git a/jjb/scripts/logs_publish.sh b/jjb/scripts/logs_publish.sh new file mode 100644 index 000000000..da3593c66 --- /dev/null +++ b/jjb/scripts/logs_publish.sh @@ -0,0 +1,291 @@ +#!/bin/bash + +# Copyright (c) 2021 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +echo "---> logs_publish.sh" + +CDN_URL="logs.nginx.service.consul" +export AWS_ENDPOINT_URL="http://storage.service.consul:9000" + +# FIXME: s3 config (until migrated to config provider, then pwd will be reset) +mkdir -p ${HOME}/.aws +echo "[default] +aws_access_key_id = storage +aws_secret_access_key = Storage1234" >> "$HOME/.aws/credentials" + +PYTHON_SCRIPT="/w/workspace/test-logs/logs_publish.py" + +# This script uploads the artifacts to a backup upload location +if [ -f "$PYTHON_SCRIPT" ]; then + echo "WARNING: $PYTHON_SCRIPT already exists - assume backup archive upload already done" + exit 0 +fi + +pip3 install boto3 +mkdir -p $(dirname "$PYTHON_SCRIPT") + +cat >$PYTHON_SCRIPT <<'END_OF_PYTHON_SCRIPT' +#!/usr/bin/python3 + +"""Storage utilities library.""" + +import gzip +import logging +import os +import shutil +import subprocess +import sys +import tempfile +from mimetypes import MimeTypes + +import boto3 +from botocore.exceptions import ClientError +import requests +import six + + +logging.basicConfig( + format=u"%(levelname)s: %(message)s", + stream=sys.stdout, + level=logging.INFO +) +logging.getLogger(u"botocore").setLevel(logging.INFO) + +COMPRESS_MIME = ( + u"text/html", + u"text/xml", + u"text/plain", + u"application/octet-stream" +) + + +def compress(src_fpath): + """Compress a single file. + + :param src_fpath: Input file path. + :type src_fpath: str + """ + with open(src_fpath, u"rb") as orig_file: + with gzip.open(src_fpath + ".gz", u"wb") as zipped_file: + zipped_file.writelines(orig_file) + + +def copy_archives(workspace): + """Copy files or directories in a $WORKSPACE/archives to the current + directory. + + :params workspace: Workspace directery with archives directory. + :type workspace: str + """ + archives_dir = os.path.join(workspace, u"archives") + dest_dir = os.getcwd() + + logging.debug(u"Copying files from " + archives_dir + u" to " + dest_dir) + + if os.path.exists(archives_dir): + if os.path.isfile(archives_dir): + logging.error(u"Target is a file, not a directory.") + raise RuntimeError(u"Not a directory.") + else: + logging.debug("Archives dir {} does exist.".format(archives_dir)) + for file_or_dir in os.listdir(archives_dir): + f = os.path.join(archives_dir, file_or_dir) + try: + logging.debug(u"Copying " + f) + shutil.copy(f, dest_dir) + except shutil.Error as e: + logging.error(e) + raise RuntimeError(u"Could not copy " + f) + else: + logging.error(u"Archives dir does not exist.") + raise RuntimeError(u"Missing directory " + archives_dir) + + +def upload(s3_resource, s3_bucket, src_fpath, s3_path): + """Upload single file to destination bucket. + + :param s3_resource: S3 storage resource. + :param s3_bucket: S3 bucket name. + :param src_fpath: Input file path. + :param s3_path: Destination file path on remote storage. + :type s3_resource: Object + :type s3_bucket: str + :type src_fpath: str + :type s3_path: str + """ + mime_guess = MimeTypes().guess_type(src_fpath) + mime = mime_guess[0] + encoding = mime_guess[1] + if not mime: + mime = u"application/octet-stream" + + if s3_bucket not in u"docs.fd.io": + if mime in COMPRESS_MIME and encoding != u"gzip": + compress(src_fpath) + src_fpath = src_fpath + u".gz" + s3_path = s3_path + u".gz" + + extra_args = {u"ContentType": mime} + + try: + logging.info(u"Attempting to upload file " + src_fpath) + s3_resource.Bucket(s3_bucket).upload_file( + src_fpath, s3_path, ExtraArgs=extra_args + ) + logging.info(u"Successfully uploaded to " + s3_path) + except ClientError as e: + logging.error(e) + + +def upload_recursive(s3_resource, s3_bucket, src_fpath, s3_path): + """Recursively uploads input folder to destination. + + Example: + - s3_bucket: logs.fd.io + - src_fpath: /workspace/archives. + - s3_path: /hostname/job/id/ + + :param s3_resource: S3 storage resource. + :param s3_bucket: S3 bucket name. + :param src_fpath: Input folder path. + :param s3_path: S3 destination path. + :type s3_resource: Object + :type s3_bucket: str + :type src_fpath: str + :type s3_path: str + """ + for path, _, files in os.walk(src_fpath): + for file in files: + _path = path.replace(src_fpath, u"") + _src_fpath = path + u"/" + file + _s3_path = os.path.normpath(s3_path + u"/" + _path + u"/" + file) + upload( + s3_resource=s3_resource, + s3_bucket=s3_bucket, + src_fpath=_src_fpath, + s3_path=_s3_path + ) + + +def deploy_s3(s3_bucket, s3_path, build_url, workspace): + """Add logs and archives to temp directory to be shipped to S3 bucket. + Fetches logs and system information and pushes them and archives to S3 + for log archiving. + Requires the s3 bucket to exist. + + :param s3_bucket: Name of S3 bucket. Eg: lf-project-date + :param s3_path: Path on S3 bucket place the logs and archives. Eg: + $JENKINS_HOSTNAME/$JOB_NAME/$BUILD_NUMBER + :param build_url: URL of the Jenkins build. Jenkins typically provides this + via the $BUILD_URL environment variable. + :param workspace: Directory in which to search, typically in Jenkins this is + $WORKSPACE + :type s3_bucket: Object + :type s3_path: str + :type build_url: str + :type workspace: str + """ + s3_resource = boto3.resource( + u"s3", + endpoint_url=os.environ[u"AWS_ENDPOINT_URL"] + ) + + previous_dir = os.getcwd() + work_dir = tempfile.mkdtemp(prefix="backup-s3.") + os.chdir(work_dir) + + # Copy archive files to tmp dir. + copy_archives(workspace) + + # Create additional build logs. + with open(u"_build-details.log", u"w+") as f: + f.write(u"build-url: " + build_url) + + with open(u"_sys-info.log", u"w+") as f: + sys_cmds = [] + + logging.debug(u"Platform: " + sys.platform) + if sys.platform == u"linux" or sys.platform == u"linux2": + sys_cmds = [ + [u"uname", u"-a"], + [u"lscpu"], + [u"nproc"], + [u"df", u"-h"], + [u"free", u"-m"], + [u"ip", u"addr"], + [u"sar", u"-b", u"-r", u"-n", u"DEV"], + [u"sar", u"-P", u"ALL"], + ] + + for c in sys_cmds: + try: + output = subprocess.check_output(c).decode(u"utf-8") + except FileNotFoundError: + logging.debug(u"Command not found: " + c) + continue + + cmd = u" ".join(c) + output = u"---> " + cmd + "\n" + output + "\n" + f.write(output) + logging.info(output) + + # Magic string used to trim console logs at the appropriate level during + # wget. + MAGIC_STRING = u"-----END_OF_BUILD-----" + logging.info(MAGIC_STRING) + + resp = requests.get(build_url + u"/consoleText") + with open(u"console.log", u"w+", encoding=u"utf-8") as f: + f.write( + six.text_type(resp.content.decode(u"utf-8").split(MAGIC_STRING)[0]) + ) + + query = u"time=HH:mm:ss&appendLog" + resp = requests.get(build_url + u"/timestamps?" + query) + with open(u"console-timestamp.log", u"w+", encoding=u"utf-8") as f: + f.write( + six.text_type(resp.content.decode(u"utf-8").split(MAGIC_STRING)[0]) + ) + + upload_recursive( + s3_resource=s3_resource, + s3_bucket=s3_bucket, + src_fpath=work_dir, + s3_path=s3_path + ) + + os.chdir(previous_dir) + shutil.rmtree(work_dir) + + +if __name__ == u"__main__": + globals()[sys.argv[1]](*sys.argv[2:]) + +END_OF_PYTHON_SCRIPT + +# The 'deploy_s3' command below expects the archives +# directory to exist. Normally lf-infra-sysstat or similar would +# create it and add content, but to make sure this script is +# self-contained, we ensure it exists here. +mkdir -p "$WORKSPACE/archives" + +s3_path="$JENKINS_HOSTNAME/$JOB_NAME/$BUILD_NUMBER/" +echo "INFO: S3 path $s3_path" + +echo "INFO: archiving backup logs to S3" +# shellcheck disable=SC2086 +python3 $PYTHON_SCRIPT deploy_s3 "logs.fd.io" "$s3_path" \ + "$BUILD_URL" "$WORKSPACE" + +echo "S3 build backup logs: <a href=\"https://$CDN_URL/$s3_path\">https://$CDN_URL/$s3_path</a>" diff --git a/jjb/scripts/post_build_deploy_archives.sh b/jjb/scripts/post_build_deploy_archives.sh index 355d6fd48..02a56f130 100755 --- a/jjb/scripts/post_build_deploy_archives.sh +++ b/jjb/scripts/post_build_deploy_archives.sh @@ -75,39 +75,6 @@ generate_vpp_stacktrace_and_delete_core() { mkdir -p "$WS_ARCHIVES_DIR" -# Log the build environment variables -echo "Logging build environment variables in '$BUILD_ENV_LOG'..." -env > $BUILD_ENV_LOG - -echo "ARCHIVE_ARTIFACTS = '$ARCHIVE_ARTIFACTS'" -if [ -n "${ARCHIVE_ARTIFACTS:-}" ] ; then - pushd "$WORKSPACE" - shopt -s globstar # Enable globstar to copy archives - for file in $ARCHIVE_ARTIFACTS ; do - if [ -f "$file" ] ; then - echo "Archiving '$file' to '$destfile'" - destfile="$WS_ARCHIVE_DIR$file" - destdir="$(dirname $destfile)" - mkdir -p $destdir - mv -f $file $destfile - else - echo "Not archiving '$file'" - if ! grep -qe '*' <<<"$file" ; then - echo "WARNING: No artifacts detected in ARCHIVE_ARTIFACTS '$ARCHIVE_ARTIFACTS'!" - fi - fi - done - shopt -u globstar # Disable globstar - popd -fi - -# find and gzip any 'text' files -find $WS_ARCHIVES_DIR -type f -print0 \ - | xargs -0r file \ - | egrep -e ':.*text.*' \ - | cut -d: -f1 \ - | xargs -d'\n' -r gzip - # generate stack trace for VPP core files for upload instead of core file. if [ -d "$WORKSPACE/build-root" ] ; then for file in $(find $WS_ARCHIVES_DIR -type f -name 'core*.gz') ; do diff --git a/jjb/scripts/vpp/csit-device.sh b/jjb/scripts/vpp/csit-device.sh index 6d4beb80f..8c76d717f 100644 --- a/jjb/scripts/vpp/csit-device.sh +++ b/jjb/scripts/vpp/csit-device.sh @@ -38,3 +38,4 @@ fi popd csit_entry_dir="${WORKSPACE}/csit/resources/libraries/bash/entry" source "${csit_entry_dir}/with_oper_for_vpp.sh" "per_patch_device.sh" +cp -R "${WORKSPACE}/csit_current/archives/"* "${WORKSPACE}/archives/" || true diff --git a/jjb/scripts/vpp/csit-perf.sh b/jjb/scripts/vpp/csit-perf.sh index e57306651..cb13557ac 100644 --- a/jjb/scripts/vpp/csit-perf.sh +++ b/jjb/scripts/vpp/csit-perf.sh @@ -38,3 +38,5 @@ fi popd csit_entry_dir="${WORKSPACE}/csit/resources/libraries/bash/entry" source "${csit_entry_dir}/with_oper_for_vpp.sh" "per_patch_perf.sh" +cp -R "${WORKSPACE}/csit_current/"* "${WORKSPACE}/archives/" || true +cp -R "${WORKSPACE}/csit_parent/"* "${WORKSPACE}/archives/" || true |