aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Mikus <peter.mikus@icloud.com>2025-01-23 13:26:27 +0100
committerPeter Mikus <peter.mikus@icloud.com>2025-01-23 12:31:51 +0000
commitd5dc64e4e09b70dd8647ee186b48231e72e9897c (patch)
tree25faf2e4a41168266d62ae1dfc385a387b1b44c8
parent64f263770e6ddd9f05aa5444a6619f0ba88f70db (diff)
fix(ansible): Add docker_sut awareness
Signed-off-by: Peter Mikus <peter.mikus@icloud.com> Change-Id: I3dc3188cab76231a430abc23c409471bcc330409
-rw-r--r--fdio.infra.ansible/roles/cleanup/files/reset_vppdevice.sh113
-rw-r--r--fdio.infra.ansible/roles/cleanup/tasks/clean_images.yaml19
-rw-r--r--fdio.infra.ansible/roles/cleanup/tasks/kill_containers.yaml14
-rw-r--r--fdio.infra.ansible/roles/cleanup/tasks/kill_process.yaml8
-rw-r--r--fdio.infra.ansible/roles/cleanup/tasks/main.yaml16
-rw-r--r--fdio.infra.ansible/roles/cleanup/tasks/nomad.yaml6
-rw-r--r--fdio.infra.ansible/roles/cleanup/tasks/remove_package.yaml4
-rw-r--r--fdio.infra.ansible/roles/cleanup/tasks/sut.yaml38
-rw-r--r--fdio.infra.ansible/roles/cleanup/tasks/tg.yaml6
-rw-r--r--fdio.infra.ansible/roles/cleanup/tasks/vpp_device.yaml32
10 files changed, 66 insertions, 190 deletions
diff --git a/fdio.infra.ansible/roles/cleanup/files/reset_vppdevice.sh b/fdio.infra.ansible/roles/cleanup/files/reset_vppdevice.sh
deleted file mode 100644
index ede2db1273..0000000000
--- a/fdio.infra.ansible/roles/cleanup/files/reset_vppdevice.sh
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/usr/bin/env bash
-
-set -euo pipefail
-
-function die () {
- # Print the message to standard error end exit with error code specified
- # by the second argument.
- #
- # Hardcoded values:
- # - The default error message.
- # Arguments:
- # - ${1} - The whole error message, be sure to quote. Optional
- # - ${2} - the code to exit with, default: 1.
-
- set +eu
- warn "${1:-Unspecified run-time error occurred!}"
- exit "${2:-1}"
-}
-
-
-function set_eligibility_off {
- # Set Nomad eligibility to ineligible for scheduling. Fail otherwise.
-
- set -euo pipefail
-
- node_id="$(nomad node status | grep $(hostname) | cut -d ' ' -f 1)" || die
- node_status="$(nomad node status | grep $(hostname))" || die
-
- if [[ "${node_status}" != *"ineligible"* ]]; then
- nomad node eligibility -disable "${node_id}" || die
- node_status="$(nomad node status | grep $(hostname))" || die
- if [[ "${node_status}" != *"ineligible"* ]]; then
- die "Set eligibility off failed!"
- fi
- fi
-}
-
-
-function set_eligibility_on {
- # Set Nomad eligibility to eligible for scheduling. Fail otherwise.
-
- set -euo pipefail
-
- node_id="$(nomad node status | grep $(hostname) | cut -d ' ' -f 1)" || die
- node_status="$(nomad node status | grep $(hostname))" || die
-
- if [[ "${node_status}" == *"ineligible"* ]]; then
- nomad node eligibility -enable "${node_id}" || die
- node_status="$(nomad node status | grep $(hostname))" || die
- if [[ "${node_status}" == *"ineligible"* ]]; then
- die "Set eligibility on failed!"
- fi
- fi
-}
-
-
-function restart_vfs_service {
- # Stop and start VF serice. This will reinitialize VFs and driver mappings.
-
- set -euo pipefail
-
- warn "Restarting VFs service (this may take few minutes)..."
- sudo service csit-initialize-vfs stop || die "Failed to stop VFs service!"
- sudo service csit-initialize-vfs start || die "Failed to start VFs service!"
-}
-
-
-function wait_for_pending_containers {
- # Wait in loop for defined amount of time for pending containers to
- # gracefully quit them. If parameter force is specified. Force kill them.
-
- # Arguments:
- # - ${@} - Script parameters.
-
- set -euo pipefail
-
- retries=60
- wait_time=60
- containers=(docker ps --quiet --filter name=csit*)
-
- for i in $(seq 1 ${retries}); do
- mapfile -t pending_containers < <( ${containers[@]} ) || die
- warn "Waiting for pending containers [${pending_containers[@]}] ..."
- if [ ${#pending_containers[@]} -eq 0 ]; then
- break
- fi
- sleep "${wait_time}" || die
- done
- if [ ${#pending_containers[@]} -ne 0 ]; then
- if [[ "${1-}" == "force" ]]; then
- warn "Force killing [${pending_containers[@]}] ..."
- docker rm --force ${pending_containers[@]} || die
- else
- die "Still few containers running!"
- fi
- fi
-}
-
-
-function warn () {
- # Print the message to standard error.
- #
- # Arguments:
- # - ${@} - The text of the message.
-
- echo "$@" >&2
-}
-
-
-set_eligibility_off || die
-wait_for_pending_containers "${@}" || die
-restart_vfs_service || die
-set_eligibility_on || die
diff --git a/fdio.infra.ansible/roles/cleanup/tasks/clean_images.yaml b/fdio.infra.ansible/roles/cleanup/tasks/clean_images.yaml
index 76704ab50d..269b0e523d 100644
--- a/fdio.infra.ansible/roles/cleanup/tasks/clean_images.yaml
+++ b/fdio.infra.ansible/roles/cleanup/tasks/clean_images.yaml
@@ -1,9 +1,9 @@
---
# file: tasks/clean_images.yaml
-- name: Clean Docker Images
+- name: "Clean Docker Images"
block:
- - name: Clean Images - Prefetch Docker Images
+ - name: "Clean Images - Prefetch Docker Images"
ansible.builtin.cron:
name: "Prefetch docker image {{ item }}"
minute: "10"
@@ -14,7 +14,7 @@
tags:
- prefetch-docker-images
- - name: Clean Images - Remove Dangling Docker Images
+ - name: "Clean Images - Remove Dangling Docker Images"
ansible.builtin.cron:
name: "Remove dangling docker images"
minute: "10"
@@ -22,15 +22,4 @@
weekday: "7"
job: "/usr/bin/docker rmi $(/usr/bin/docker images --filter 'dangling=true' -q)"
tags:
- - remove-docker-images-dangling
-
-# TODO: Disabled until all images will be in registry
-# - name: Clean Images - Prune Docker Images
-# cron:
-# name: "Prune docker images"
-# minute: "10"
-# hour: "6"
-# weekday: 7
-# job: "/usr/bin/docker image prune --all --force"
-# tags:
-# - prune-docker-images
+ - remove-docker-images-dangling \ No newline at end of file
diff --git a/fdio.infra.ansible/roles/cleanup/tasks/kill_containers.yaml b/fdio.infra.ansible/roles/cleanup/tasks/kill_containers.yaml
index dc739eb954..9c3dc173bc 100644
--- a/fdio.infra.ansible/roles/cleanup/tasks/kill_containers.yaml
+++ b/fdio.infra.ansible/roles/cleanup/tasks/kill_containers.yaml
@@ -1,37 +1,37 @@
---
# file: tasks/kill_containers.yaml
-- name: Kill Docker Containers
+- name: "Kill Docker Containers"
block:
- - name: Get Running Docker Containers
+ - name: "Get Running Docker Containers"
ansible.builtin.shell: "docker ps -a --filter name=DUT -q"
register: running_containers
changed_when: false
tags:
- kill-containers
- - name: Remove All Docker Containers
+ - name: "Remove All Docker Containers"
ansible.builtin.shell: "docker rm --force {{ item }}"
with_items: "{{ running_containers.stdout_lines }}"
tags:
- kill-containers
rescue:
- - name: Restart Docker Daemon
+ - name: "Restart Docker Daemon"
ansible.builtin.systemd:
name: "docker"
state: "restarted"
-- name: Kill LXC Containers
+- name: "Kill LXC Containers"
block:
- - name: Get Running LXC Containers
+ - name: "Get Running LXC Containers"
ansible.builtin.shell: "lxc-ls"
register: running_containers
changed_when: false
tags:
- kill-containers
- - name: Remove All LXC Containers
+ - name: "Remove All LXC Containers"
ansible.builtin.shell: "lxc-destroy --force -n {{ item }}"
with_items: "{{ running_containers.stdout_lines }}"
tags:
diff --git a/fdio.infra.ansible/roles/cleanup/tasks/kill_process.yaml b/fdio.infra.ansible/roles/cleanup/tasks/kill_process.yaml
index 9ab98a8e57..94f6d0439e 100644
--- a/fdio.infra.ansible/roles/cleanup/tasks/kill_process.yaml
+++ b/fdio.infra.ansible/roles/cleanup/tasks/kill_process.yaml
@@ -1,9 +1,9 @@
---
# file: tasks/kill_process.yaml
-- name: Kill Process - {{ process }}
+- name: "Kill Process - {{ process }}"
block:
- - name: Get PID Of {{ process }}
+ - name: "Get PID Of {{ process }}"
ansible.builtin.shell: "ps -ef | grep -v grep | grep -w {{ process }} | awk '{print $2}'"
when:
- process is defined and process != ""
@@ -11,7 +11,7 @@
tags:
- kill-process
- - name: Safe Kill {{ process }}
+ - name: "Safe Kill {{ process }}"
ansible.builtin.shell: "kill {{ item }}"
with_items: "{{ running_processes.stdout_lines }}"
ignore_errors: true
@@ -27,7 +27,7 @@
tags:
- kill-process
- - name: Kill Process - Force Kill {{ process }}
+ - name: "Kill Process - Force Kill {{ process }}"
ansible.builtin.shell: "kill -9 {{ item }}"
with_items: "{{ killed_processes.results | select('failed') | map(attribute='item') | list }}"
tags:
diff --git a/fdio.infra.ansible/roles/cleanup/tasks/main.yaml b/fdio.infra.ansible/roles/cleanup/tasks/main.yaml
index c97b9c5d7e..82e5a372bb 100644
--- a/fdio.infra.ansible/roles/cleanup/tasks/main.yaml
+++ b/fdio.infra.ansible/roles/cleanup/tasks/main.yaml
@@ -1,26 +1,20 @@
---
# file: tasks/main.yaml
-- name: tg specific
- include_tasks: tg.yaml
+- name: "tg specific"
+ include_tasks: "tg.yaml"
when: "'tg' in group_names"
tags:
- cleanup
-- name: sut specific
+- name: "sut specific"
include_tasks: sut.yaml
when: "'sut' in group_names"
tags:
- cleanup
-- name: vpp_device specific
- include_tasks: vpp_device.yaml
- when: "'vpp_device' in group_names"
- tags:
- - cleanup
-
-- name: nomad specific
- include_tasks: nomad.yaml
+- name: "nomad specific"
+ include_tasks: "nomad.yaml"
when: "'nomad' in group_names"
tags:
- cleanup
diff --git a/fdio.infra.ansible/roles/cleanup/tasks/nomad.yaml b/fdio.infra.ansible/roles/cleanup/tasks/nomad.yaml
index 086a4eff7d..babf6be34a 100644
--- a/fdio.infra.ansible/roles/cleanup/tasks/nomad.yaml
+++ b/fdio.infra.ansible/roles/cleanup/tasks/nomad.yaml
@@ -1,10 +1,10 @@
---
# file: tasks/nomad.yaml
-- name: Host Cleanup
+- name: "Host Cleanup"
block:
- - name: Clean Images
- import_tasks: clean_images.yaml
+ - name: "Clean Images"
+ import_tasks: "clean_images.yaml"
vars:
images_to_prefetch_by_arch:
aarch64:
diff --git a/fdio.infra.ansible/roles/cleanup/tasks/remove_package.yaml b/fdio.infra.ansible/roles/cleanup/tasks/remove_package.yaml
index 652729bc30..a40b1d36c8 100644
--- a/fdio.infra.ansible/roles/cleanup/tasks/remove_package.yaml
+++ b/fdio.infra.ansible/roles/cleanup/tasks/remove_package.yaml
@@ -1,14 +1,14 @@
---
# file: tasks/remove_package.yaml
-- name: Fix Corrupted APT
+- name: "Fix Corrupted APT"
ansible.builtin.shell: "dpkg --configure -a"
when:
- ansible_distribution == 'Ubuntu'
tags:
- remove-package
-- name: Remove Package - {{ package }}
+- name: "Remove Package - {{ package }}"
ansible.builtin.apt:
name: "{{ package }}"
force: true
diff --git a/fdio.infra.ansible/roles/cleanup/tasks/sut.yaml b/fdio.infra.ansible/roles/cleanup/tasks/sut.yaml
index 22bf596369..fa0d89816f 100644
--- a/fdio.infra.ansible/roles/cleanup/tasks/sut.yaml
+++ b/fdio.infra.ansible/roles/cleanup/tasks/sut.yaml
@@ -89,9 +89,47 @@
tags:
- kill-containers
+ - name: "Set SSH port to 6001 if docker_sut"
+ set_fact:
+ ansible_port: 6001
+ when: docker_sut is defined
+ tags:
+ - remove-package
+
+ - name: Remove Packages - Remove VPP
+ import_tasks: remove_package.yaml
+ when: docker_sut is defined
+ vars:
+ package: "*vpp*"
+ tags:
+ - remove-package
+
+ - name: "Set SSH port to 6002 if docker_sut"
+ set_fact:
+ ansible_port: 6002
+ when: docker_sut is defined
+ tags:
+ - remove-package
+
- name: Remove Packages - Remove VPP
import_tasks: remove_package.yaml
+ when: docker_sut is defined
vars:
package: "*vpp*"
tags:
- remove-package
+
+ - name: "Set SSH port to 22 if docker_sut"
+ set_fact:
+ ansible_port: 22
+ when: docker_sut is defined
+ tags:
+ - remove-package
+
+ - name: Remove Packages - Remove VPP
+ import_tasks: remove_package.yaml
+ when: docker_sut is undefined
+ vars:
+ package: "*vpp*"
+ tags:
+ - remove-package \ No newline at end of file
diff --git a/fdio.infra.ansible/roles/cleanup/tasks/tg.yaml b/fdio.infra.ansible/roles/cleanup/tasks/tg.yaml
index 8c0162df2c..423fd43b1d 100644
--- a/fdio.infra.ansible/roles/cleanup/tasks/tg.yaml
+++ b/fdio.infra.ansible/roles/cleanup/tasks/tg.yaml
@@ -1,10 +1,10 @@
---
# file: tasks/tg.yaml
-- name: Host Cleanup
+- name: "Host Cleanup"
block:
- - name: Kill Processes - TRex
- import_tasks: kill_process.yaml
+ - name: "Kill Processes - TRex"
+ import_tasks: "kill_process.yaml"
vars:
process: "_t-rex"
when:
diff --git a/fdio.infra.ansible/roles/cleanup/tasks/vpp_device.yaml b/fdio.infra.ansible/roles/cleanup/tasks/vpp_device.yaml
deleted file mode 100644
index c97fa0cde5..0000000000
--- a/fdio.infra.ansible/roles/cleanup/tasks/vpp_device.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-# file: tasks/vpp_device.yaml
-
-- name: Host Cleanup
- block:
- - name: Reset vpp_device Binary
- ansible.builtin.copy:
- src: "files/reset_vppdevice.sh"
- dest: "/usr/local/bin"
- owner: "root"
- group: "root"
- mode: "744"
- tags:
- - reset-sriov
-
- - name: Clean Images
- import_tasks: clean_images.yaml
- vars:
- images_to_prefetch_by_arch:
- aarch64:
- - "fdiotools/builder-ubuntu2004:prod-aarch64"
- - "fdiotools/builder-ubuntu1804:prod-aarch64"
- - "fdiotools/builder-centos8:prod-aarch64"
- x86_64:
- - "fdiotools/builder-ubuntu2004:prod-x86_64"
- - "fdiotools/builder-ubuntu1804:prod-x86_64"
- - "fdiotools/builder-debian10:prod-x86_64"
- - "fdiotools/builder-debian9:prod-x86_64"
- - "fdiotools/builder-centos8:prod-x86_64"
- - "fdiotools/builder-centos7:prod-x86_64"
- tags:
- - clean-images