From df5672b3d9c29b51397f4770eb992c9f3f3955ce Mon Sep 17 00:00:00 2001 From: pmikus Date: Thu, 8 Apr 2021 10:44:18 +0000 Subject: Ansible git move + Better accessibility + Compliant with fdio.infra._function_ - function [pxe|terraform|ansible|vagrant] + dill==0.3.3 also applied on TBs - ci-man to follow today - Docs to be updated in separate patch Signed-off-by: pmikus Change-Id: Iff9eaa29d63044188cc8160db2d9b44b7635782a --- fdio.infra.ansible/.gitignore | 1 + fdio.infra.ansible/cloud_topology.yaml | 9 + fdio.infra.ansible/dev.yaml | 18 + .../inventories/lf_inventory/group_vars/all.yaml | 5 + .../lf_inventory/host_vars/10.30.51.16.yaml | 29 + .../lf_inventory/host_vars/10.30.51.17.yaml | 29 + .../lf_inventory/host_vars/10.30.51.18.yaml | 29 + .../lf_inventory/host_vars/10.30.51.20.yaml | 29 + .../lf_inventory/host_vars/10.30.51.21.yaml | 29 + .../lf_inventory/host_vars/10.30.51.22.yaml | 29 + .../lf_inventory/host_vars/10.30.51.24.yaml | 29 + .../lf_inventory/host_vars/10.30.51.25.yaml | 29 + .../lf_inventory/host_vars/10.30.51.26.yaml | 29 + .../lf_inventory/host_vars/10.30.51.28.yaml | 64 ++ .../lf_inventory/host_vars/10.30.51.29.yaml | 63 ++ .../lf_inventory/host_vars/10.30.51.30.yaml | 65 ++ .../lf_inventory/host_vars/10.30.51.32.yaml | 64 ++ .../lf_inventory/host_vars/10.30.51.33.yaml | 64 ++ .../lf_inventory/host_vars/10.30.51.34.yaml | 65 ++ .../lf_inventory/host_vars/10.30.51.35.yaml | 65 ++ .../lf_inventory/host_vars/10.30.51.36.yaml | 21 + .../lf_inventory/host_vars/10.30.51.37.yaml | 21 + .../lf_inventory/host_vars/10.30.51.38.yaml | 24 + .../lf_inventory/host_vars/10.30.51.39.yaml | 70 ++ .../lf_inventory/host_vars/10.30.51.40.yaml | 70 ++ .../lf_inventory/host_vars/10.30.51.44.yaml | 29 + .../lf_inventory/host_vars/10.30.51.45.yaml | 29 + .../lf_inventory/host_vars/10.30.51.46.yaml | 29 + .../lf_inventory/host_vars/10.30.51.47.yaml | 29 + .../lf_inventory/host_vars/10.30.51.48.yaml | 29 + .../lf_inventory/host_vars/10.30.51.49.yaml | 30 + .../lf_inventory/host_vars/10.30.51.50.yaml | 69 ++ .../lf_inventory/host_vars/10.30.51.51.yaml | 69 ++ .../lf_inventory/host_vars/10.30.51.52.yaml | 29 + .../lf_inventory/host_vars/10.30.51.53.yaml | 29 + .../lf_inventory/host_vars/10.30.51.54.yaml | 29 + .../lf_inventory/host_vars/10.30.51.55.yaml | 29 + .../lf_inventory/host_vars/10.30.51.56.yaml | 29 + .../lf_inventory/host_vars/10.30.51.57.yaml | 29 + .../lf_inventory/host_vars/10.30.51.58.yaml | 29 + .../lf_inventory/host_vars/10.30.51.59.yaml | 29 + .../lf_inventory/host_vars/10.30.51.60.yaml | 29 + .../lf_inventory/host_vars/10.30.51.65.yaml | 67 ++ .../lf_inventory/host_vars/10.30.51.66.yaml | 67 ++ .../lf_inventory/host_vars/10.30.51.67.yaml | 67 ++ .../lf_inventory/host_vars/10.30.51.68.yaml | 67 ++ .../lf_inventory/host_vars/10.30.51.69.yaml | 37 ++ .../lf_inventory/host_vars/10.30.51.70.yaml | 72 +++ .../lf_inventory/host_vars/10.30.51.71.yaml | 72 +++ .../lf_inventory/host_vars/10.32.8.10.yaml | 30 + .../lf_inventory/host_vars/10.32.8.11.yaml | 29 + .../lf_inventory/host_vars/10.32.8.12.yaml | 29 + .../lf_inventory/host_vars/10.32.8.13.yaml | 29 + .../lf_inventory/host_vars/10.32.8.14.yaml | 70 ++ .../lf_inventory/host_vars/10.32.8.15.yaml | 70 ++ .../lf_inventory/host_vars/10.32.8.16.yaml | 70 ++ .../lf_inventory/host_vars/10.32.8.17.yaml | 70 ++ .../lf_inventory/host_vars/10.32.8.18.yaml | 29 + .../lf_inventory/host_vars/10.32.8.19.yaml | 29 + .../lf_inventory/host_vars/10.32.8.20.yaml | 29 + .../lf_inventory/host_vars/10.32.8.21.yaml | 29 + .../lf_inventory/host_vars/10.32.8.22.yaml | 29 + .../lf_inventory/host_vars/10.32.8.23.yaml | 29 + .../lf_inventory/host_vars/10.32.8.24.yaml | 26 + .../lf_inventory/host_vars/10.32.8.25.yaml | 26 + fdio.infra.ansible/inventories/lf_inventory/hosts | 74 +++ .../sample_inventory/group_vars/all.yaml | 5 + .../sample_inventory/host_vars/1.1.1.1.yaml | 17 + .../inventories/sample_inventory/hosts | 9 + fdio.infra.ansible/nomad.yaml | 32 + fdio.infra.ansible/roles/ab/defaults/main.yaml | 20 + fdio.infra.ansible/roles/ab/tasks/main.yaml | 18 + fdio.infra.ansible/roles/aws/defaults/main.yaml | 2 + fdio.infra.ansible/roles/aws/handlers/main.yaml | 15 + fdio.infra.ansible/roles/aws/tasks/main.yaml | 93 +++ .../roles/aws/tasks/ubuntu_bionic.yaml | 10 + .../roles/aws/tasks/ubuntu_focal.yaml | 10 + fdio.infra.ansible/roles/azure/defaults/main.yaml | 3 + fdio.infra.ansible/roles/azure/files/10-dtap.link | 4 + fdio.infra.ansible/roles/azure/handlers/main.yaml | 15 + fdio.infra.ansible/roles/azure/tasks/main.yaml | 38 ++ .../roles/baremetal/handlers/cimc.yaml | 74 +++ .../roles/baremetal/handlers/ipmi.yaml | 52 ++ .../roles/baremetal/handlers/main.yaml | 30 + .../roles/cadvisor/defaults/main.yaml | 24 + fdio.infra.ansible/roles/cadvisor/tasks/main.yaml | 39 ++ .../roles/calibration/defaults/main.yaml | 47 ++ .../roles/calibration/tasks/aarch64.yaml | 2 + .../roles/calibration/tasks/main.yaml | 89 +++ .../roles/calibration/tasks/x86_64.yaml | 35 + .../roles/cleanup/files/reset_vppdevice.sh | 113 ++++ .../roles/cleanup/tasks/clean_images.yaml | 36 ++ .../roles/cleanup/tasks/kill_containers.yaml | 42 ++ .../roles/cleanup/tasks/kill_process.yaml | 37 ++ fdio.infra.ansible/roles/cleanup/tasks/main.yaml | 43 ++ fdio.infra.ansible/roles/cleanup/tasks/nomad.yaml | 22 + .../roles/cleanup/tasks/remove_package.yaml | 21 + fdio.infra.ansible/roles/cleanup/tasks/sut.yaml | 83 +++ fdio.infra.ansible/roles/cleanup/tasks/tg.yaml | 13 + .../roles/cleanup/tasks/vpp_device.yaml | 32 + fdio.infra.ansible/roles/common/defaults/main.yaml | 72 +++ fdio.infra.ansible/roles/common/handlers/main.yaml | 8 + fdio.infra.ansible/roles/common/tasks/main.yaml | 55 ++ fdio.infra.ansible/roles/consul/defaults/main.yaml | 110 ++++ fdio.infra.ansible/roles/consul/handlers/main.yaml | 23 + fdio.infra.ansible/roles/consul/meta/main.yaml | 9 + fdio.infra.ansible/roles/consul/tasks/main.yaml | 182 ++++++ .../roles/consul/templates/base.hcl.j2 | 43 ++ .../roles/consul/templates/consul.hcl.j2 | 12 + .../consul/templates/consul_systemd.service.j2 | 21 + .../roles/consul/templates/ports.hcl.j2 | 9 + .../roles/consul/templates/services.json.j2 | 13 + .../roles/consul/templates/telemetry.hcl.j2 | 3 + fdio.infra.ansible/roles/consul/vars/main.yaml | 5 + .../roles/csit_sut_image/files/Dockerfile | 166 +++++ .../roles/csit_sut_image/files/supervisord.conf | 24 + .../roles/csit_sut_image/tasks/main.yaml | 30 + fdio.infra.ansible/roles/docker/defaults/main.yaml | 38 ++ fdio.infra.ansible/roles/docker/handlers/main.yaml | 9 + fdio.infra.ansible/roles/docker/meta/main.yaml | 4 + fdio.infra.ansible/roles/docker/tasks/main.yaml | 82 +++ .../roles/docker/tasks/ubuntu_bionic.yaml | 30 + .../roles/docker/tasks/ubuntu_focal.yaml | 30 + .../roles/docker/templates/daemon.json.j2 | 1 + .../docker/templates/docker.service.proxy.http | 4 + .../docker/templates/docker.service.proxy.https | 4 + fdio.infra.ansible/roles/dpdk/defaults/main.yaml | 31 + .../roles/dpdk/files/dpdk-mlx5.patch | 19 + fdio.infra.ansible/roles/dpdk/tasks/main.yaml | 68 ++ fdio.infra.ansible/roles/iperf/defaults/main.yaml | 26 + fdio.infra.ansible/roles/iperf/tasks/main.yaml | 62 ++ .../jenkins_job_health_exporter/defaults/main.yaml | 35 + .../jenkins_job_health_exporter/handlers/main.yaml | 9 + .../jenkins_job_health_exporter/tasks/main.yaml | 38 ++ .../templates/jenkins-job-health-exporter.j2 | 16 + .../jenkins-job-health-exporter.service.j2 | 13 + fdio.infra.ansible/roles/kernel/defaults/main.yaml | 43 ++ .../roles/kernel/filter_plugins/main.py | 143 +++++ fdio.infra.ansible/roles/kernel/handlers/main.yaml | 8 + fdio.infra.ansible/roles/kernel/tasks/main.yaml | 9 + .../roles/kernel/tasks/ubuntu_bionic.yaml | 51 ++ .../roles/kernel/tasks/ubuntu_focal.yaml | 51 ++ .../roles/kernel_vm/files/initramfs_modules | 4 + .../roles/kernel_vm/files/initramfs_resume | 1 + fdio.infra.ansible/roles/kernel_vm/tasks/main.yaml | 92 +++ .../roles/kubernetes/defaults/main.yaml | 15 + .../roles/kubernetes/tasks/main.yaml | 14 + .../roles/kubernetes/tasks/ubuntu_bionic.yaml | 37 ++ .../roles/mellanox/defaults/main.yaml | 21 + fdio.infra.ansible/roles/mellanox/tasks/main.yaml | 67 ++ fdio.infra.ansible/roles/nomad/defaults/main.yaml | 105 +++ fdio.infra.ansible/roles/nomad/handlers/main.yaml | 10 + fdio.infra.ansible/roles/nomad/meta/main.yaml | 9 + fdio.infra.ansible/roles/nomad/tasks/main.yaml | 192 ++++++ .../roles/nomad/templates/base.hcl.j2 | 11 + .../roles/nomad/templates/client.hcl.j2 | 31 + .../roles/nomad/templates/custom.hcl.j2 | 5 + .../roles/nomad/templates/nomad_systemd.service.j2 | 21 + .../roles/nomad/templates/server.hcl.j2 | 16 + .../roles/nomad/templates/telemetry.hcl.j2 | 20 + .../roles/nomad/templates/tls.hcl.j2 | 12 + fdio.infra.ansible/roles/nomad/vars/main.yaml | 5 + .../roles/performance_tuning/defaults/main.yaml | 20 + .../roles/performance_tuning/files/cpufrequtils | 1 + .../files/disable-turbo-boost.service | 10 + .../roles/performance_tuning/files/irqbalance | 25 + .../performance_tuning/filter_plugins/main.py | 29 + .../roles/performance_tuning/handlers/main.yaml | 13 + .../roles/performance_tuning/tasks/main.yaml | 189 ++++++ .../performance_tuning/tasks/turbo_boost.yaml | 44 ++ .../roles/prometheus_exporter/defaults/main.yaml | 17 + .../roles/prometheus_exporter/files/blackbox.yml | 25 + .../roles/prometheus_exporter/handlers/main.yaml | 16 + .../roles/prometheus_exporter/tasks/main.yaml | 15 + .../prometheus_exporter/tasks/ubuntu_bionic.yaml | 33 + .../roles/python_env/defaults/main.yaml | 41 ++ .../roles/python_env/tasks/main.yaml | 82 +++ .../tg/files/csit-initialize-docker-tg.service | 12 + .../roles/tg/files/csit-initialize-docker-tg.sh | 58 ++ fdio.infra.ansible/roles/tg/handlers/main.yaml | 10 + fdio.infra.ansible/roles/tg/tasks/main.yaml | 30 + fdio.infra.ansible/roles/topology/tasks/main.yaml | 9 + .../topology/templates/topology_2n_aws_c5n.j2 | 56 ++ .../topology/templates/topology_3n_aws_c5n.j2 | 83 +++ .../topology/templates/topology_3n_azure_Fsv2.j2 | 82 +++ fdio.infra.ansible/roles/trex/defaults/main.yaml | 44 ++ fdio.infra.ansible/roles/trex/files/t-rex.patch | 548 ++++++++++++++++ .../roles/trex/tasks/deploy_block.yaml | 55 ++ fdio.infra.ansible/roles/trex/tasks/main.yaml | 24 + .../roles/user_add/defaults/main.yaml | 11 + .../roles/user_add/handlers/main.yaml | 7 + fdio.infra.ansible/roles/user_add/tasks/main.yaml | 48 ++ fdio.infra.ansible/roles/vpp/defaults/main.yaml | 36 ++ fdio.infra.ansible/roles/vpp/tasks/main.yaml | 27 + .../files/csit-initialize-vfs-default.sh | 22 + .../vpp_device/files/csit-initialize-vfs-tx2.sh | 34 + .../vpp_device/files/csit-initialize-vfs.service | 12 + .../roles/vpp_device/files/csit-initialize-vfs.sh | 73 +++ .../roles/vpp_device/handlers/main.yaml | 21 + .../roles/vpp_device/tasks/main.yaml | 92 +++ fdio.infra.ansible/site.yaml | 26 + fdio.infra.ansible/sut.yaml | 103 +++ fdio.infra.ansible/tg.yaml | 111 ++++ fdio.infra.ansible/vault.yml | 706 +++++++++++++++++++++ fdio.infra.ansible/vault_pass | 1 + fdio.infra.ansible/vpp_device.yaml | 40 ++ 206 files changed, 9033 insertions(+) create mode 100644 fdio.infra.ansible/.gitignore create mode 100644 fdio.infra.ansible/cloud_topology.yaml create mode 100644 fdio.infra.ansible/dev.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/group_vars/all.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.16.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.17.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.18.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.20.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.21.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.22.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.24.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.25.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.26.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.28.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.29.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.30.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.32.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.33.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.34.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.35.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.36.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.37.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.38.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.39.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.40.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.44.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.45.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.46.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.47.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.48.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.49.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.50.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.51.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.52.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.53.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.54.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.55.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.56.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.57.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.58.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.59.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.60.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.65.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.66.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.67.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.68.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.69.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.70.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.71.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.10.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.11.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.12.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.13.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.14.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.15.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.16.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.17.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.18.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.19.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.20.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.21.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.22.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.23.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.24.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.25.yaml create mode 100644 fdio.infra.ansible/inventories/lf_inventory/hosts create mode 100644 fdio.infra.ansible/inventories/sample_inventory/group_vars/all.yaml create mode 100644 fdio.infra.ansible/inventories/sample_inventory/host_vars/1.1.1.1.yaml create mode 100644 fdio.infra.ansible/inventories/sample_inventory/hosts create mode 100644 fdio.infra.ansible/nomad.yaml create mode 100644 fdio.infra.ansible/roles/ab/defaults/main.yaml create mode 100644 fdio.infra.ansible/roles/ab/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/aws/defaults/main.yaml create mode 100644 fdio.infra.ansible/roles/aws/handlers/main.yaml create mode 100644 fdio.infra.ansible/roles/aws/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/aws/tasks/ubuntu_bionic.yaml create mode 100644 fdio.infra.ansible/roles/aws/tasks/ubuntu_focal.yaml create mode 100644 fdio.infra.ansible/roles/azure/defaults/main.yaml create mode 100644 fdio.infra.ansible/roles/azure/files/10-dtap.link create mode 100644 fdio.infra.ansible/roles/azure/handlers/main.yaml create mode 100644 fdio.infra.ansible/roles/azure/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/baremetal/handlers/cimc.yaml create mode 100644 fdio.infra.ansible/roles/baremetal/handlers/ipmi.yaml create mode 100644 fdio.infra.ansible/roles/baremetal/handlers/main.yaml create mode 100644 fdio.infra.ansible/roles/cadvisor/defaults/main.yaml create mode 100644 fdio.infra.ansible/roles/cadvisor/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/calibration/defaults/main.yaml create mode 100644 fdio.infra.ansible/roles/calibration/tasks/aarch64.yaml create mode 100644 fdio.infra.ansible/roles/calibration/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/calibration/tasks/x86_64.yaml create mode 100644 fdio.infra.ansible/roles/cleanup/files/reset_vppdevice.sh create mode 100644 fdio.infra.ansible/roles/cleanup/tasks/clean_images.yaml create mode 100644 fdio.infra.ansible/roles/cleanup/tasks/kill_containers.yaml create mode 100644 fdio.infra.ansible/roles/cleanup/tasks/kill_process.yaml create mode 100644 fdio.infra.ansible/roles/cleanup/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/cleanup/tasks/nomad.yaml create mode 100644 fdio.infra.ansible/roles/cleanup/tasks/remove_package.yaml create mode 100644 fdio.infra.ansible/roles/cleanup/tasks/sut.yaml create mode 100644 fdio.infra.ansible/roles/cleanup/tasks/tg.yaml create mode 100644 fdio.infra.ansible/roles/cleanup/tasks/vpp_device.yaml create mode 100644 fdio.infra.ansible/roles/common/defaults/main.yaml create mode 100644 fdio.infra.ansible/roles/common/handlers/main.yaml create mode 100644 fdio.infra.ansible/roles/common/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/consul/defaults/main.yaml create mode 100644 fdio.infra.ansible/roles/consul/handlers/main.yaml create mode 100644 fdio.infra.ansible/roles/consul/meta/main.yaml create mode 100644 fdio.infra.ansible/roles/consul/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/consul/templates/base.hcl.j2 create mode 100644 fdio.infra.ansible/roles/consul/templates/consul.hcl.j2 create mode 100644 fdio.infra.ansible/roles/consul/templates/consul_systemd.service.j2 create mode 100644 fdio.infra.ansible/roles/consul/templates/ports.hcl.j2 create mode 100644 fdio.infra.ansible/roles/consul/templates/services.json.j2 create mode 100644 fdio.infra.ansible/roles/consul/templates/telemetry.hcl.j2 create mode 100644 fdio.infra.ansible/roles/consul/vars/main.yaml create mode 100644 fdio.infra.ansible/roles/csit_sut_image/files/Dockerfile create mode 100644 fdio.infra.ansible/roles/csit_sut_image/files/supervisord.conf create mode 100644 fdio.infra.ansible/roles/csit_sut_image/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/docker/defaults/main.yaml create mode 100644 fdio.infra.ansible/roles/docker/handlers/main.yaml create mode 100644 fdio.infra.ansible/roles/docker/meta/main.yaml create mode 100644 fdio.infra.ansible/roles/docker/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/docker/tasks/ubuntu_bionic.yaml create mode 100644 fdio.infra.ansible/roles/docker/tasks/ubuntu_focal.yaml create mode 100644 fdio.infra.ansible/roles/docker/templates/daemon.json.j2 create mode 100644 fdio.infra.ansible/roles/docker/templates/docker.service.proxy.http create mode 100644 fdio.infra.ansible/roles/docker/templates/docker.service.proxy.https create mode 100644 fdio.infra.ansible/roles/dpdk/defaults/main.yaml create mode 100644 fdio.infra.ansible/roles/dpdk/files/dpdk-mlx5.patch create mode 100644 fdio.infra.ansible/roles/dpdk/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/iperf/defaults/main.yaml create mode 100644 fdio.infra.ansible/roles/iperf/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/jenkins_job_health_exporter/defaults/main.yaml create mode 100644 fdio.infra.ansible/roles/jenkins_job_health_exporter/handlers/main.yaml create mode 100644 fdio.infra.ansible/roles/jenkins_job_health_exporter/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/jenkins_job_health_exporter/templates/jenkins-job-health-exporter.j2 create mode 100644 fdio.infra.ansible/roles/jenkins_job_health_exporter/templates/jenkins-job-health-exporter.service.j2 create mode 100644 fdio.infra.ansible/roles/kernel/defaults/main.yaml create mode 100644 fdio.infra.ansible/roles/kernel/filter_plugins/main.py create mode 100644 fdio.infra.ansible/roles/kernel/handlers/main.yaml create mode 100644 fdio.infra.ansible/roles/kernel/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/kernel/tasks/ubuntu_bionic.yaml create mode 100644 fdio.infra.ansible/roles/kernel/tasks/ubuntu_focal.yaml create mode 100644 fdio.infra.ansible/roles/kernel_vm/files/initramfs_modules create mode 100644 fdio.infra.ansible/roles/kernel_vm/files/initramfs_resume create mode 100644 fdio.infra.ansible/roles/kernel_vm/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/kubernetes/defaults/main.yaml create mode 100644 fdio.infra.ansible/roles/kubernetes/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/kubernetes/tasks/ubuntu_bionic.yaml create mode 100644 fdio.infra.ansible/roles/mellanox/defaults/main.yaml create mode 100644 fdio.infra.ansible/roles/mellanox/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/nomad/defaults/main.yaml create mode 100644 fdio.infra.ansible/roles/nomad/handlers/main.yaml create mode 100644 fdio.infra.ansible/roles/nomad/meta/main.yaml create mode 100644 fdio.infra.ansible/roles/nomad/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/nomad/templates/base.hcl.j2 create mode 100644 fdio.infra.ansible/roles/nomad/templates/client.hcl.j2 create mode 100644 fdio.infra.ansible/roles/nomad/templates/custom.hcl.j2 create mode 100644 fdio.infra.ansible/roles/nomad/templates/nomad_systemd.service.j2 create mode 100644 fdio.infra.ansible/roles/nomad/templates/server.hcl.j2 create mode 100644 fdio.infra.ansible/roles/nomad/templates/telemetry.hcl.j2 create mode 100644 fdio.infra.ansible/roles/nomad/templates/tls.hcl.j2 create mode 100644 fdio.infra.ansible/roles/nomad/vars/main.yaml create mode 100644 fdio.infra.ansible/roles/performance_tuning/defaults/main.yaml create mode 100644 fdio.infra.ansible/roles/performance_tuning/files/cpufrequtils create mode 100644 fdio.infra.ansible/roles/performance_tuning/files/disable-turbo-boost.service create mode 100644 fdio.infra.ansible/roles/performance_tuning/files/irqbalance create mode 100644 fdio.infra.ansible/roles/performance_tuning/filter_plugins/main.py create mode 100644 fdio.infra.ansible/roles/performance_tuning/handlers/main.yaml create mode 100644 fdio.infra.ansible/roles/performance_tuning/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/performance_tuning/tasks/turbo_boost.yaml create mode 100644 fdio.infra.ansible/roles/prometheus_exporter/defaults/main.yaml create mode 100644 fdio.infra.ansible/roles/prometheus_exporter/files/blackbox.yml create mode 100644 fdio.infra.ansible/roles/prometheus_exporter/handlers/main.yaml create mode 100644 fdio.infra.ansible/roles/prometheus_exporter/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/prometheus_exporter/tasks/ubuntu_bionic.yaml create mode 100644 fdio.infra.ansible/roles/python_env/defaults/main.yaml create mode 100644 fdio.infra.ansible/roles/python_env/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/tg/files/csit-initialize-docker-tg.service create mode 100755 fdio.infra.ansible/roles/tg/files/csit-initialize-docker-tg.sh create mode 100644 fdio.infra.ansible/roles/tg/handlers/main.yaml create mode 100644 fdio.infra.ansible/roles/tg/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/topology/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/topology/templates/topology_2n_aws_c5n.j2 create mode 100644 fdio.infra.ansible/roles/topology/templates/topology_3n_aws_c5n.j2 create mode 100644 fdio.infra.ansible/roles/topology/templates/topology_3n_azure_Fsv2.j2 create mode 100644 fdio.infra.ansible/roles/trex/defaults/main.yaml create mode 100644 fdio.infra.ansible/roles/trex/files/t-rex.patch create mode 100644 fdio.infra.ansible/roles/trex/tasks/deploy_block.yaml create mode 100644 fdio.infra.ansible/roles/trex/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/user_add/defaults/main.yaml create mode 100644 fdio.infra.ansible/roles/user_add/handlers/main.yaml create mode 100644 fdio.infra.ansible/roles/user_add/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/vpp/defaults/main.yaml create mode 100644 fdio.infra.ansible/roles/vpp/tasks/main.yaml create mode 100644 fdio.infra.ansible/roles/vpp_device/files/csit-initialize-vfs-default.sh create mode 100644 fdio.infra.ansible/roles/vpp_device/files/csit-initialize-vfs-tx2.sh create mode 100644 fdio.infra.ansible/roles/vpp_device/files/csit-initialize-vfs.service create mode 100644 fdio.infra.ansible/roles/vpp_device/files/csit-initialize-vfs.sh create mode 100644 fdio.infra.ansible/roles/vpp_device/handlers/main.yaml create mode 100644 fdio.infra.ansible/roles/vpp_device/tasks/main.yaml create mode 100644 fdio.infra.ansible/site.yaml create mode 100644 fdio.infra.ansible/sut.yaml create mode 100644 fdio.infra.ansible/tg.yaml create mode 100644 fdio.infra.ansible/vault.yml create mode 100644 fdio.infra.ansible/vault_pass create mode 100644 fdio.infra.ansible/vpp_device.yaml (limited to 'fdio.infra.ansible') diff --git a/fdio.infra.ansible/.gitignore b/fdio.infra.ansible/.gitignore new file mode 100644 index 0000000000..bed1e33b88 --- /dev/null +++ b/fdio.infra.ansible/.gitignore @@ -0,0 +1 @@ +site.retry \ No newline at end of file diff --git a/fdio.infra.ansible/cloud_topology.yaml b/fdio.infra.ansible/cloud_topology.yaml new file mode 100644 index 0000000000..083a3bd368 --- /dev/null +++ b/fdio.infra.ansible/cloud_topology.yaml @@ -0,0 +1,9 @@ +--- +# file: cloud_topology.yaml + +- hosts: localhost + gather_facts: false + roles: + - role: topology + tags: topology + diff --git a/fdio.infra.ansible/dev.yaml b/fdio.infra.ansible/dev.yaml new file mode 100644 index 0000000000..6f6d2a7cb5 --- /dev/null +++ b/fdio.infra.ansible/dev.yaml @@ -0,0 +1,18 @@ +--- +# file: dev.yaml + +- hosts: dev + remote_user: testuser + become: yes + become_user: root + gather_facts: false + pre_tasks: + - name: Gathering Facts + gather_facts: + tags: + - always + roles: + - role: user_add + tags: user_add + - role: docker + tags: docker diff --git a/fdio.infra.ansible/inventories/lf_inventory/group_vars/all.yaml b/fdio.infra.ansible/inventories/lf_inventory/group_vars/all.yaml new file mode 100644 index 0000000000..0756621eef --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/group_vars/all.yaml @@ -0,0 +1,5 @@ +--- +# file: lf_inventory/group_vars/all.yaml + +# Ansible interpreter (for PIP) +ansible_python_interpreter: "/usr/bin/python3" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.16.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.16.yaml new file mode 100644 index 0000000000..660e33ee53 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.16.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.30.51.16.yaml + +hostname: "t1-tg1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-17,19-35" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-17,19-35" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-17,19-35" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,18" + vm: + nr_hugepages: 8192 + max_map_count: 20000 + +inventory_cimc_hostname: '10.30.50.16' +cpu_microarchitecture: "haswell" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.17.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.17.yaml new file mode 100644 index 0000000000..48e5083332 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.17.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.30.51.17.yaml + +hostname: "t1-sut1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-17,19-35" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-17,19-35" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-17,19-35" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,18" + vm: + nr_hugepages: 36864 + max_map_count: 20000 + +inventory_cimc_hostname: '10.30.50.17' +cpu_microarchitecture: "haswell" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.18.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.18.yaml new file mode 100644 index 0000000000..f823ed6406 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.18.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.30.51.18.yaml + +hostname: "t1-sut2" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-17,19-35" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-17,19-35" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-17,19-35" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,18" + vm: + nr_hugepages: 36864 + max_map_count: 20000 + +inventory_cimc_hostname: '10.30.50.18' +cpu_microarchitecture: "haswell" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.20.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.20.yaml new file mode 100644 index 0000000000..68af6e81f7 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.20.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.30.51.20.yaml + +hostname: "t2-tg1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-17,19-35" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-17,19-35" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-17,19-35" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,18" + vm: + nr_hugepages: 8192 + max_map_count: 20000 + +inventory_cimc_hostname: '10.30.50.20' +cpu_microarchitecture: "haswell" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.21.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.21.yaml new file mode 100644 index 0000000000..0d43874ae3 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.21.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.30.51.21.yaml + +hostname: "t2-sut1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-17,19-35" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-17,19-35" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-17,19-35" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,18" + vm: + nr_hugepages: 36864 + max_map_count: 20000 + +inventory_cimc_hostname: '10.30.50.21' +cpu_microarchitecture: "haswell" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.22.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.22.yaml new file mode 100644 index 0000000000..797606a353 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.22.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.30.51.22.yaml + +hostname: "t2-sut2" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-17,19-35" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-17,19-35" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-17,19-35" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,18" + vm: + nr_hugepages: 36864 + max_map_count: 20000 + +inventory_cimc_hostname: '10.30.50.22' +cpu_microarchitecture: "haswell" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.24.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.24.yaml new file mode 100644 index 0000000000..25bb536f81 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.24.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.30.51.24.yaml + +hostname: "t3-tg1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-17,19-35" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-17,19-35" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-17,19-35" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,18" + vm: + nr_hugepages: 8192 + max_map_count: 20000 + +inventory_cimc_hostname: '10.30.50.24' +cpu_microarchitecture: "haswell" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.25.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.25.yaml new file mode 100644 index 0000000000..db02aa7816 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.25.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.30.51.25.yaml + +hostname: "t3-sut1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-17,19-35" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-17,19-35" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-17,19-35" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,18" + vm: + nr_hugepages: 36864 + max_map_count: 20000 + +inventory_cimc_hostname: '10.30.50.25' +cpu_microarchitecture: "haswell" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.26.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.26.yaml new file mode 100644 index 0000000000..0e97bd0fd2 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.26.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.30.51.26.yaml + +hostname: "t3-sut2" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-17,19-35" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-17,19-35" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-17,19-35" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,18" + vm: + nr_hugepages: 36864 + max_map_count: 20000 + +inventory_cimc_hostname: '10.30.50.26' +cpu_microarchitecture: "haswell" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.28.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.28.yaml new file mode 100644 index 0000000000..bf1da2a759 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.28.yaml @@ -0,0 +1,64 @@ +--- +# file: host_vars/10.30.51.28.yaml + +hostname: "s41-nomad" +inventory_cimc_hostname: "10.30.50.28" + +# User management. +users: + - username: localadmin + groups: [adm, sudo] + password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - username: testuser + groups: [adm, sudo] + password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + +# Nomad settings. +nomad_certificates: + - src: "{{ vault_nomad_v2_ca_file }}" + dest: "{{ nomad_ca_file }}" + - src: "{{ vault_nomad_v2_cert_file }}" + dest: "{{ nomad_cert_file }}" + - src: "{{ vault_nomad_v2_key_file }}" + dest: "{{ nomad_key_file }}" +nomad_datacenter: "yul1" +nomad_name: "{{ hostname }}-{{ ansible_architecture }}" +nomad_node_role: "client" +nomad_node_class: "builder" +nomad_options: + driver.raw_exec.enable: 1 + docker.cleanup.image: false + docker.privileged.enabled: true + docker.volumes.enabled: true + driver.whitelist: "docker,raw_exec,exec" + fingerprint.network.disallow_link_local: true +nomad_servers: [ "10.30.51.32:4647", "10.30.51.33:4647" ] + +# Consul settigs. +consul_nomad_integration: true +consul_certificates: + - src: "{{ vault_consul_v1_ca_file }}" + dest: "{{ consul_ca_file }}" + - src: "{{ vault_consul_v1_cert_file }}" + dest: "{{ consul_cert_file }}" + - src: "{{ vault_consul_v1_key_file }}" + dest: "{{ consul_key_file }}" +consul_datacenter: "yul1" +consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +consul_node_name: "{{ hostname }}" +consul_node_role: "client" +consul_retry_servers: + - "10.30.51.30" + - "10.30.51.32" + - "10.30.51.33" + +# Docker daemon settings. +docker_daemon: + # https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file + dns: [ "172.17.0.1" ] + dns-opts: [] + dns-search: [ "{{ansible_hostname}}" ] \ No newline at end of file diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.29.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.29.yaml new file mode 100644 index 0000000000..5b3a1725b3 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.29.yaml @@ -0,0 +1,63 @@ +--- +# file: host_vars/10.30.51.29.yaml + +hostname: "s39-nomad" +inventory_cimc_hostname: "10.30.50.29" + +# User management. +users: + - username: localadmin + groups: [adm, sudo] + password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - username: testuser + groups: [adm, sudo] + password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + +# Nomad settings. +nomad_certificates: + - src: "{{ vault_nomad_v1_ca_file }}" + dest: "{{ nomad_ca_file }}" + - src: "{{ vault_nomad_v1_cert_file }}" + dest: "{{ nomad_cert_file }}" + - src: "{{ vault_nomad_v1_key_file }}" + dest: "{{ nomad_key_file }}" +nomad_datacenter: "yul1" +nomad_name: "{{ hostname }}-{{ ansible_architecture }}" +nomad_node_role: "client" +nomad_node_class: "builder" +nomad_options: + driver.raw_exec.enable: 1 + docker.cleanup.image: false + docker.privileged.enabled: true + docker.volumes.enabled: true + driver.whitelist: "docker,raw_exec,exec" +nomad_servers: [ "10.30.51.32:4647", "10.30.51.33:4647" ] + +# Consul settigs. +consul_nomad_integration: true +consul_certificates: + - src: "{{ vault_consul_v1_ca_file }}" + dest: "{{ consul_ca_file }}" + - src: "{{ vault_consul_v1_cert_file }}" + dest: "{{ consul_cert_file }}" + - src: "{{ vault_consul_v1_key_file }}" + dest: "{{ consul_key_file }}" +consul_datacenter: "yul1" +consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +consul_node_name: "{{ hostname }}" +consul_node_role: "client" +consul_retry_servers: + - "10.30.51.30" + - "10.30.51.32" + - "10.30.51.33" + +# Docker daemon settings. +docker_daemon: + # https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file + dns: [ "172.17.0.1" ] + dns-opts: [] + dns-search: [ "{{ansible_hostname}}" ] \ No newline at end of file diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.30.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.30.yaml new file mode 100644 index 0000000000..13306a74a9 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.30.yaml @@ -0,0 +1,65 @@ +--- +# file: host_vars/10.30.51.30.yaml + +hostname: "s40-nomad" +inventory_cimc_hostname: "10.30.50.30" + +# User management. +users: + - username: localadmin + groups: [adm, sudo] + password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - username: testuser + groups: [adm, sudo] + password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + +# Nomad settings. +nomad_certificates: + - src: "{{ vault_nomad_v2_ca_file }}" + dest: "{{ nomad_ca_file }}" + - src: "{{ vault_nomad_v2_cert_file }}" + dest: "{{ nomad_cert_file }}" + - src: "{{ vault_nomad_v2_key_file }}" + dest: "{{ nomad_key_file }}" +nomad_datacenter: "yul1" +nomad_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +nomad_name: "{{ hostname }}-{{ ansible_architecture }}" +nomad_node_role: "both" +nomad_node_class: "builder" +nomad_options: + driver.raw_exec.enable: 1 + docker.cleanup.image: false + docker.privileged.enabled: true + docker.volumes.enabled: true + driver.whitelist: "docker,raw_exec,exec" + fingerprint.network.disallow_link_local: true +nomad_retry_servers: [ "10.30.51.32", "10.30.51.33" ] +nomad_servers: [ "10.30.51.32:4647", "10.30.51.33:4647" ] + +# Consul settigs. +consul_nomad_integration: true +consul_certificates: + - src: "{{ vault_consul_v1_ca_file }}" + dest: "{{ consul_ca_file }}" + - src: "{{ vault_consul_v1_cert_file }}" + dest: "{{ consul_cert_file }}" + - src: "{{ vault_consul_v1_key_file }}" + dest: "{{ consul_key_file }}" +consul_datacenter: "yul1" +consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +consul_node_name: "{{ hostname }}" +consul_node_role: "both" +consul_retry_servers: + - "10.30.51.32" + - "10.30.51.33" + +# Docker daemon settings. +docker_daemon: + # https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file + dns: [ "172.17.0.1" ] + dns-opts: [] + dns-search: [ "{{ansible_hostname}}" ] \ No newline at end of file diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.32.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.32.yaml new file mode 100644 index 0000000000..a52008f023 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.32.yaml @@ -0,0 +1,64 @@ +--- +# file: host_vars/10.30.51.32.yaml + +hostname: "s42-nomad" +inventory_cimc_hostname: "10.30.50.32" + +# User management. +users: + - username: localadmin + groups: [adm, sudo] + password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - username: testuser + groups: [adm, sudo] + password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + +# Nomad settings. +nomad_certificates: + - src: "{{ vault_nomad_v2_ca_file }}" + dest: "{{ nomad_ca_file }}" + - src: "{{ vault_nomad_v2_cert_file }}" + dest: "{{ nomad_cert_file }}" + - src: "{{ vault_nomad_v2_key_file }}" + dest: "{{ nomad_key_file }}" +nomad_datacenter: "yul1" +nomad_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +nomad_name: "{{ hostname }}-{{ ansible_architecture }}" +nomad_node_role: "both" +nomad_node_class: "builder" +nomad_options: + driver.raw_exec.enable: 1 + docker.cleanup.image: false + docker.privileged.enabled: true + docker.volumes.enabled: true + driver.whitelist: "docker,raw_exec,exec" +nomad_retry_servers: [ "10.30.51.33", "10.30.51.30" ] +nomad_servers: [ "10.30.51.32:4647" ] + +# Consul settigs. +consul_nomad_integration: true +consul_certificates: + - src: "{{ vault_consul_v1_ca_file }}" + dest: "{{ consul_ca_file }}" + - src: "{{ vault_consul_v1_cert_file }}" + dest: "{{ consul_cert_file }}" + - src: "{{ vault_consul_v1_key_file }}" + dest: "{{ consul_key_file }}" +consul_datacenter: "yul1" +consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +consul_node_name: "{{ hostname }}" +consul_node_role: "both" +consul_retry_servers: + - "10.30.51.30" + - "10.30.51.33" + +# Docker daemon settings. +docker_daemon: + # https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file + dns: [ "172.17.0.1" ] + dns-opts: [] + dns-search: [ "{{ansible_hostname}}" ] \ No newline at end of file diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.33.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.33.yaml new file mode 100644 index 0000000000..7ab2f823cb --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.33.yaml @@ -0,0 +1,64 @@ +--- +# file: host_vars/10.30.51.33.yaml + +hostname: "s43-nomad" +inventory_cimc_hostname: "10.30.50.33" + +# User management. +users: + - username: localadmin + groups: [adm, sudo] + password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - username: testuser + groups: [adm, sudo] + password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + +# Nomad settings. +nomad_certificates: + - src: "{{ vault_nomad_v2_ca_file }}" + dest: "{{ nomad_ca_file }}" + - src: "{{ vault_nomad_v2_cert_file }}" + dest: "{{ nomad_cert_file }}" + - src: "{{ vault_nomad_v2_key_file }}" + dest: "{{ nomad_key_file }}" +nomad_datacenter: "yul1" +nomad_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +nomad_name: "{{ hostname }}-{{ ansible_architecture }}" +nomad_node_role: "both" +nomad_node_class: "builder" +nomad_options: + driver.raw_exec.enable: 1 + docker.cleanup.image: false + docker.privileged.enabled: true + docker.volumes.enabled: true + driver.whitelist: "docker,raw_exec,exec" +nomad_retry_servers: [ "10.30.51.32", "10.30.51.30" ] +nomad_servers: [ "10.30.51.33:4647" ] + +# Consul settigs. +consul_nomad_integration: true +consul_certificates: + - src: "{{ vault_consul_v1_ca_file }}" + dest: "{{ consul_ca_file }}" + - src: "{{ vault_consul_v1_cert_file }}" + dest: "{{ consul_cert_file }}" + - src: "{{ vault_consul_v1_key_file }}" + dest: "{{ consul_key_file }}" +consul_datacenter: "yul1" +consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +consul_node_name: "{{ hostname }}" +consul_node_role: "both" +consul_retry_servers: + - "10.30.51.30" + - "10.30.51.32" + +# Docker daemon settings. +docker_daemon: + # https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file + dns: [ "172.17.0.1" ] + dns-opts: [] + dns-search: [ "{{ansible_hostname}}" ] \ No newline at end of file diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.34.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.34.yaml new file mode 100644 index 0000000000..a45cda8e86 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.34.yaml @@ -0,0 +1,65 @@ +--- +# file: host_vars/10.30.51.34.yaml + +hostname: "s44-nomad" +inventory_cimc_hostname: "10.30.50.34" + +# User management. +users: + - username: localadmin + groups: [adm, sudo] + password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - username: testuser + groups: [adm, sudo] + password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + +# Nomad settings. +nomad_certificates: + - src: "{{ vault_nomad_v2_ca_file }}" + dest: "{{ nomad_ca_file }}" + - src: "{{ vault_nomad_v2_cert_file }}" + dest: "{{ nomad_cert_file }}" + - src: "{{ vault_nomad_v2_key_file }}" + dest: "{{ nomad_key_file }}" +nomad_datacenter: "yul1" +nomad_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +nomad_name: "{{ hostname }}-{{ ansible_architecture }}" +nomad_node_role: "client" +nomad_node_class: "builder" +nomad_options: + driver.raw_exec.enable: 1 + docker.cleanup.image: false + docker.privileged.enabled: true + docker.volumes.enabled: true + driver.whitelist: "docker,raw_exec,exec" +nomad_retry_servers: [ "10.30.51.32", "10.30.51.33" ] +nomad_servers: [ "10.30.51.33:4647" ] + +# Consul settigs. +consul_nomad_integration: true +consul_certificates: + - src: "{{ vault_consul_v1_ca_file }}" + dest: "{{ consul_ca_file }}" + - src: "{{ vault_consul_v1_cert_file }}" + dest: "{{ consul_cert_file }}" + - src: "{{ vault_consul_v1_key_file }}" + dest: "{{ consul_key_file }}" +consul_datacenter: "yul1" +consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +consul_node_name: "{{ hostname }}" +consul_node_role: "client" +consul_retry_servers: + - "10.30.51.30" + - "10.30.51.32" + - "10.30.51.33" + +# Docker daemon settings. +docker_daemon: + # https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file + dns: [ "172.17.0.1" ] + dns-opts: [] + dns-search: [ "{{ansible_hostname}}" ] \ No newline at end of file diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.35.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.35.yaml new file mode 100644 index 0000000000..f609a839c4 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.35.yaml @@ -0,0 +1,65 @@ +--- +# file: host_vars/10.30.51.35.yaml + +hostname: "s45-nomad" +inventory_cimc_hostname: "10.30.50.35" + +# User management. +users: + - username: localadmin + groups: [adm, sudo] + password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - username: testuser + groups: [adm, sudo] + password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + +# Nomad settings. +nomad_certificates: + - src: "{{ vault_nomad_v2_ca_file }}" + dest: "{{ nomad_ca_file }}" + - src: "{{ vault_nomad_v2_cert_file }}" + dest: "{{ nomad_cert_file }}" + - src: "{{ vault_nomad_v2_key_file }}" + dest: "{{ nomad_key_file }}" +nomad_datacenter: "yul1" +nomad_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +nomad_name: "{{ hostname }}-{{ ansible_architecture }}" +nomad_node_role: "client" +nomad_node_class: "builder" +nomad_options: + driver.raw_exec.enable: 1 + docker.cleanup.image: false + docker.privileged.enabled: true + docker.volumes.enabled: true + driver.whitelist: "docker,raw_exec,exec" +nomad_retry_servers: [ "10.30.51.32", "10.30.51.33" ] +nomad_servers: [ "10.30.51.33:4647" ] + +# Consul settigs. +consul_nomad_integration: true +consul_certificates: + - src: "{{ vault_consul_v1_ca_file }}" + dest: "{{ consul_ca_file }}" + - src: "{{ vault_consul_v1_cert_file }}" + dest: "{{ consul_cert_file }}" + - src: "{{ vault_consul_v1_key_file }}" + dest: "{{ consul_key_file }}" +consul_datacenter: "yul1" +consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +consul_node_name: "{{ hostname }}" +consul_node_role: "client" +consul_retry_servers: + - "10.30.51.30" + - "10.30.51.32" + - "10.30.51.33" + +# Docker daemon settings. +docker_daemon: + # https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file + dns: [ "172.17.0.1" ] + dns-opts: [] + dns-search: [ "{{ansible_hostname}}" ] \ No newline at end of file diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.36.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.36.yaml new file mode 100644 index 0000000000..f57c8df45f --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.36.yaml @@ -0,0 +1,21 @@ +--- +# file: host_vars/10.30.51.36.yaml + +hostname: "s17-t33-sut1" +grub: + audit: "0" + intel_iommu: "on" + isolcpus: "1-15,17-31,33-47,49-63" + nmi_watchdog: "0" + nohz_full: "1-15,17-31,33-47,49-63" + nosoftlockup: True + processor.max_cstate: "1" + rcu_nocbs: "1-15,17-31,33-47,49-63" +sysctl: + kernel: + watchdog_cpumask: "0,16,32,48" + vm: + nr_hugepages: 32768 + +inventory_ipmi_hostname: '10.30.50.36' +cpu_microarchitecture: "taishan" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.37.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.37.yaml new file mode 100644 index 0000000000..90fe27e275 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.37.yaml @@ -0,0 +1,21 @@ +--- +# file: host_vars/10.30.51.37.yaml + +hostname: "s18-t33-sut2" +grub: + audit: "0" + intel_iommu: "on" + isolcpus: "1-15,17-31,33-47,49-63" + nmi_watchdog: "0" + nohz_full: "1-15,17-31,33-47,49-63" + nosoftlockup: True + processor.max_cstate: "1" + rcu_nocbs: "1-15,17-31,33-47,49-63" +sysctl: + kernel: + watchdog_cpumask: "0,16,32,48" + vm: + nr_hugepages: 32768 + +inventory_ipmi_hostname: '10.30.50.37' +cpu_microarchitecture: "taishan" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.38.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.38.yaml new file mode 100644 index 0000000000..77fee7377b --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.38.yaml @@ -0,0 +1,24 @@ +--- +# file: host_vars/10.30.51.38.yaml + +hostname: "fdio-marvell-dev" +inventory_ipmi_hostname: "10.30.50.38" +cpu_microarchitecture: "thunderx" + +# User management. +users: + - username: localadmin + groups: [adm, sudo] + password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXUbbhesOpvPF+KI8nq4TXvEO/Un1aU/ehZ9clCyw9C40xjDkX2BlcX8WeHxFHe7fjFaCd07Vg73rn/3M9uNDnDxvjH1GQ0twvi3iBTO4PkHBBfGF9qnE8MYzno6FvlsVKLuUuPbfm8kbOQ+ZDfdXq6gdtXh0hSYYkqC1heNPCNsqaakkB99Edyle+Ot0V7cpW+Yo2wo98KuX/cgUEhVoA8QnNVE7zaWcjSXBZEteoA4gLpAbV6p67/d6H/2ykHTidBViYTEsHco56tJoA4nTPuAupDOLBcWXgF5TAN6z1aCn2JA1DDfniLakgrZ5oVj2qHhUmbxQAtnKQfHADjqzV jlinkes@jlinkes" + - username: testuser + groups: [adm, sudo] + password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXUbbhesOpvPF+KI8nq4TXvEO/Un1aU/ehZ9clCyw9C40xjDkX2BlcX8WeHxFHe7fjFaCd07Vg73rn/3M9uNDnDxvjH1GQ0twvi3iBTO4PkHBBfGF9qnE8MYzno6FvlsVKLuUuPbfm8kbOQ+ZDfdXq6gdtXh0hSYYkqC1heNPCNsqaakkB99Edyle+Ot0V7cpW+Yo2wo98KuX/cgUEhVoA8QnNVE7zaWcjSXBZEteoA4gLpAbV6p67/d6H/2ykHTidBViYTEsHco56tJoA4nTPuAupDOLBcWXgF5TAN6z1aCn2JA1DDfniLakgrZ5oVj2qHhUmbxQAtnKQfHADjqzV jlinkes@jlinkes" + - username: vppdev + groups: [adm, sudo] + password: "$6$C5R6nFQlJ9Vu$l.Q6BDOMSgUFQv5OQw4viZVz8LCHziubXAwThaJXo/xVaAkqTGupPTvdMlK5r4ArDvDMPgD0AeO8/L.ciPl.g." diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.39.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.39.yaml new file mode 100644 index 0000000000..da66a5e293 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.39.yaml @@ -0,0 +1,70 @@ +--- +# file: host_vars/10.30.51.39.yaml + +hostname: "s53-nomad" +inventory_ipmi_hostname: "10.30.50.39" +cpu_microarchitecture: "thunderx" + +# User management. +users: + - username: localadmin + groups: [adm, sudo] + password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXUbbhesOpvPF+KI8nq4TXvEO/Un1aU/ehZ9clCyw9C40xjDkX2BlcX8WeHxFHe7fjFaCd07Vg73rn/3M9uNDnDxvjH1GQ0twvi3iBTO4PkHBBfGF9qnE8MYzno6FvlsVKLuUuPbfm8kbOQ+ZDfdXq6gdtXh0hSYYkqC1heNPCNsqaakkB99Edyle+Ot0V7cpW+Yo2wo98KuX/cgUEhVoA8QnNVE7zaWcjSXBZEteoA4gLpAbV6p67/d6H/2ykHTidBViYTEsHco56tJoA4nTPuAupDOLBcWXgF5TAN6z1aCn2JA1DDfniLakgrZ5oVj2qHhUmbxQAtnKQfHADjqzV jlinkes@jlinkes" + - username: testuser + groups: [adm, sudo] + password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXUbbhesOpvPF+KI8nq4TXvEO/Un1aU/ehZ9clCyw9C40xjDkX2BlcX8WeHxFHe7fjFaCd07Vg73rn/3M9uNDnDxvjH1GQ0twvi3iBTO4PkHBBfGF9qnE8MYzno6FvlsVKLuUuPbfm8kbOQ+ZDfdXq6gdtXh0hSYYkqC1heNPCNsqaakkB99Edyle+Ot0V7cpW+Yo2wo98KuX/cgUEhVoA8QnNVE7zaWcjSXBZEteoA4gLpAbV6p67/d6H/2ykHTidBViYTEsHco56tJoA4nTPuAupDOLBcWXgF5TAN6z1aCn2JA1DDfniLakgrZ5oVj2qHhUmbxQAtnKQfHADjqzV jlinkes@jlinkes" + +# Nomad settings. +nomad_certificates: + - src: "{{ vault_nomad_v1_ca_file }}" + dest: "{{ nomad_ca_file }}" + - src: "{{ vault_nomad_v1_cert_file }}" + dest: "{{ nomad_cert_file }}" + - src: "{{ vault_nomad_v1_key_file }}" + dest: "{{ nomad_key_file }}" +nomad_datacenter: "yul1" +nomad_name: "{{ hostname }}-{{ ansible_architecture }}" +nomad_node_role: "client" +nomad_node_class: "builder" +nomad_options: + driver.raw_exec.enable: 1 + docker.cleanup.image: false + docker.privileged.enabled: true + docker.volumes.enabled: true + driver.whitelist: "docker,raw_exec,exec" +nomad_servers: [ "10.30.51.32:4647", "10.30.51.33:4647" ] +nomad_cpu_total_compute: "40000" + +# Consul settigs. +consul_nomad_integration: true +consul_certificates: + - src: "{{ vault_consul_v1_ca_file }}" + dest: "{{ consul_ca_file }}" + - src: "{{ vault_consul_v1_cert_file }}" + dest: "{{ consul_cert_file }}" + - src: "{{ vault_consul_v1_key_file }}" + dest: "{{ consul_key_file }}" +consul_datacenter: "yul1" +consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +consul_node_name: "{{ hostname }}" +consul_node_role: "client" +consul_retry_servers: + - "10.30.51.30" + - "10.30.51.32" + - "10.30.51.33" + - "10.32.8.14" + - "10.32.8.15" + - "10.32.8.16" + +# Docker daemon settings. +docker_daemon: + # https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file + dns: [ "172.17.0.1" ] + dns-opts: [] + dns-search: [ "{{ansible_hostname}}" ] \ No newline at end of file diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.40.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.40.yaml new file mode 100644 index 0000000000..58839c8365 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.40.yaml @@ -0,0 +1,70 @@ +--- +# file: host_vars/10.30.51.40.yaml + +hostname: "s54-nomad" +inventory_ipmi_hostname: "10.30.50.40" +cpu_microarchitecture: "thunderx" + +# User management. +users: + - username: localadmin + groups: [adm, sudo] + password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXUbbhesOpvPF+KI8nq4TXvEO/Un1aU/ehZ9clCyw9C40xjDkX2BlcX8WeHxFHe7fjFaCd07Vg73rn/3M9uNDnDxvjH1GQ0twvi3iBTO4PkHBBfGF9qnE8MYzno6FvlsVKLuUuPbfm8kbOQ+ZDfdXq6gdtXh0hSYYkqC1heNPCNsqaakkB99Edyle+Ot0V7cpW+Yo2wo98KuX/cgUEhVoA8QnNVE7zaWcjSXBZEteoA4gLpAbV6p67/d6H/2ykHTidBViYTEsHco56tJoA4nTPuAupDOLBcWXgF5TAN6z1aCn2JA1DDfniLakgrZ5oVj2qHhUmbxQAtnKQfHADjqzV jlinkes@jlinkes" + - username: testuser + groups: [adm, sudo] + password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXUbbhesOpvPF+KI8nq4TXvEO/Un1aU/ehZ9clCyw9C40xjDkX2BlcX8WeHxFHe7fjFaCd07Vg73rn/3M9uNDnDxvjH1GQ0twvi3iBTO4PkHBBfGF9qnE8MYzno6FvlsVKLuUuPbfm8kbOQ+ZDfdXq6gdtXh0hSYYkqC1heNPCNsqaakkB99Edyle+Ot0V7cpW+Yo2wo98KuX/cgUEhVoA8QnNVE7zaWcjSXBZEteoA4gLpAbV6p67/d6H/2ykHTidBViYTEsHco56tJoA4nTPuAupDOLBcWXgF5TAN6z1aCn2JA1DDfniLakgrZ5oVj2qHhUmbxQAtnKQfHADjqzV jlinkes@jlinkes" + +# Nomad settings. +nomad_certificates: + - src: "{{ vault_nomad_v1_ca_file }}" + dest: "{{ nomad_ca_file }}" + - src: "{{ vault_nomad_v1_cert_file }}" + dest: "{{ nomad_cert_file }}" + - src: "{{ vault_nomad_v1_key_file }}" + dest: "{{ nomad_key_file }}" +nomad_datacenter: "yul1" +nomad_name: "{{ hostname }}-{{ ansible_architecture }}" +nomad_node_role: "client" +nomad_node_class: "builder" +nomad_options: + driver.raw_exec.enable: 1 + docker.cleanup.image: false + docker.privileged.enabled: true + docker.volumes.enabled: true + driver.whitelist: "docker,raw_exec,exec" +nomad_servers: [ "10.30.51.32:4647", "10.30.51.33:4647" ] +nomad_cpu_total_compute: "40000" + +# Consul settigs. +consul_nomad_integration: true +consul_certificates: + - src: "{{ vault_consul_v1_ca_file }}" + dest: "{{ consul_ca_file }}" + - src: "{{ vault_consul_v1_cert_file }}" + dest: "{{ consul_cert_file }}" + - src: "{{ vault_consul_v1_key_file }}" + dest: "{{ consul_key_file }}" +consul_datacenter: "yul1" +consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +consul_node_name: "{{ hostname }}" +consul_node_role: "client" +consul_retry_servers: + - "10.30.51.30" + - "10.30.51.32" + - "10.30.51.33" + - "10.32.8.14" + - "10.32.8.15" + - "10.32.8.16" + +# Docker daemon settings. +docker_daemon: + # https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file + dns: [ "172.17.0.1" ] + dns-opts: [] + dns-search: [ "{{ansible_hostname}}" ] \ No newline at end of file diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.44.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.44.yaml new file mode 100644 index 0000000000..2b06831186 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.44.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.30.51.44.yaml + +hostname: "s3-t21-sut1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-27,29-55,57-83,85-111" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-27,29-55,57-83,85-111" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-27,29-55,57-83,85-111" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,28,56,84" + vm: + nr_hugepages: 65536 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.50.41' +cpu_microarchitecture: "skylake" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.45.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.45.yaml new file mode 100644 index 0000000000..9818f826c9 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.45.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.30.51.45.yaml + +hostname: "s4-t21-tg1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-27,29-55,57-83,85-111" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-27,29-55,57-83,85-111" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-27,29-55,57-83,85-111" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,28,56,84" + vm: + nr_hugepages: 8192 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.50.42' +cpu_microarchitecture: "skylake" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.46.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.46.yaml new file mode 100644 index 0000000000..24be5e2e54 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.46.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.30.51.46.yaml + +hostname: "s11-t31-sut1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-27,29-55,57-83,85-111" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-27,29-55,57-83,85-111" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-27,29-55,57-83,85-111" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,28,56,84" + vm: + nr_hugepages: 36864 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.50.43' +cpu_microarchitecture: "skylake" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.47.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.47.yaml new file mode 100644 index 0000000000..7b59ff1606 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.47.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.30.51.47.yaml + +hostname: "s12-t31-sut2" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-27,29-55,57-83,85-111" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-27,29-55,57-83,85-111" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-27,29-55,57-83,85-111" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,28,56,84" + vm: + nr_hugepages: 36864 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.50.44' +cpu_microarchitecture: "skylake" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.48.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.48.yaml new file mode 100644 index 0000000000..d69fd03edc --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.48.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.30.51.48.yaml + +hostname: "s13-t31-tg1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-27,29-55,57-83,85-111" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-27,29-55,57-83,85-111" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-27,29-55,57-83,85-111" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,28,56,84" + vm: + nr_hugepages: 8192 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.50.45' +cpu_microarchitecture: "skylake" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.49.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.49.yaml new file mode 100644 index 0000000000..9e6d17fb8f --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.49.yaml @@ -0,0 +1,30 @@ +--- +# file: host_vars/10.30.51.49.yaml + +hostname: "s19-t33t211-tg1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-27,29-55,57-83,85-111" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-27,29-55,57-83,85-111" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-27,29-55,57-83,85-111" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,28,56,84" + vm: + nr_hugepages: 16384 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.50.46' +cpu_microarchitecture: "skylake" +docker_tg: true diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.50.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.50.yaml new file mode 100644 index 0000000000..f3b8886a72 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.50.yaml @@ -0,0 +1,69 @@ +--- +# file: host_vars/10.30.51.50.yaml + +hostname: "s1-t11-sut1" +grub: + hugepagesz: "2M" + nr_hugepages: 32768 +inventory_ipmi_hostname: "10.30.50.47" +cpu_microarchitecture: "skylake" + +# User management. +users: + - username: localadmin + groups: [adm, sudo] + password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - username: testuser + groups: [adm, sudo] + password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + +# Nomad settings. +nomad_certificates: + - src: "{{ vault_nomad_v1_ca_file }}" + dest: "{{ nomad_ca_file }}" + - src: "{{ vault_nomad_v1_cert_file }}" + dest: "{{ nomad_cert_file }}" + - src: "{{ vault_nomad_v1_key_file }}" + dest: "{{ nomad_key_file }}" +nomad_datacenter: "yul1" +nomad_name: "{{ hostname }}-{{ ansible_architecture }}" +nomad_node_role: "client" +nomad_node_class: "csit" +nomad_options: + driver.raw_exec.enable: 1 + docker.cleanup.image: false + docker.privileged.enabled: true + docker.volumes.enabled: true + driver.whitelist: "docker,raw_exec,exec" +nomad_servers: [ "10.30.51.32:4647", "10.30.51.33:4647" ] + +# Consul settigs. +consul_nomad_integration: true +consul_certificates: + - src: "{{ vault_consul_v1_ca_file }}" + dest: "{{ consul_ca_file }}" + - src: "{{ vault_consul_v1_cert_file }}" + dest: "{{ consul_cert_file }}" + - src: "{{ vault_consul_v1_key_file }}" + dest: "{{ consul_key_file }}" +consul_datacenter: "yul1" +consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +consul_node_name: "{{ hostname }}" +consul_node_role: "client" +consul_retry_servers: + - "10.30.51.30" + - "10.30.51.32" + - "10.30.51.33" + +# Docker settings. +docker_daemon: + # https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file + default-shm-size: "1073741824" + dns: [ "172.17.0.1" ] + dns-opts: [] + dns-search: [ "{{ ansible_hostname }}" ] + host: [ "172.17.0.1:/var/run/docker.sock" ] \ No newline at end of file diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.51.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.51.yaml new file mode 100644 index 0000000000..019cd5a968 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.51.yaml @@ -0,0 +1,69 @@ +--- +# file: host_vars/10.30.51.51.yaml + +hostname: "s2-t12-sut1" +grub: + hugepagesz: "2M" + nr_hugepages: 32768 +inventory_ipmi_hostname: "10.30.50.48" +cpu_microarchitecture: "skylake" + +# User management. +users: + - username: localadmin + groups: [adm, sudo] + password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - username: testuser + groups: [adm, sudo] + password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + +# Nomad settings. +nomad_certificates: + - src: "{{ vault_nomad_v1_ca_file }}" + dest: "{{ nomad_ca_file }}" + - src: "{{ vault_nomad_v1_cert_file }}" + dest: "{{ nomad_cert_file }}" + - src: "{{ vault_nomad_v1_key_file }}" + dest: "{{ nomad_key_file }}" +nomad_datacenter: "yul1" +nomad_name: "{{ hostname }}-{{ ansible_architecture }}" +nomad_node_role: "client" +nomad_node_class: "csit" +nomad_options: + driver.raw_exec.enable: 1 + docker.cleanup.image: false + docker.privileged.enabled: true + docker.volumes.enabled: true + driver.whitelist: "docker,raw_exec,exec" +nomad_servers: [ "10.30.51.32:4647", "10.30.51.33:4647" ] + +# Consul settigs. +consul_nomad_integration: true +consul_certificates: + - src: "{{ vault_consul_v1_ca_file }}" + dest: "{{ consul_ca_file }}" + - src: "{{ vault_consul_v1_cert_file }}" + dest: "{{ consul_cert_file }}" + - src: "{{ vault_consul_v1_key_file }}" + dest: "{{ consul_key_file }}" +consul_datacenter: "yul1" +consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +consul_node_name: "{{ hostname }}" +consul_node_role: "client" +consul_retry_servers: + - "10.30.51.30" + - "10.30.51.32" + - "10.30.51.33" + +# Docker settings. +docker_daemon: + # https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file + default-shm-size: "1073741824" + dns: [ "172.17.0.1" ] + dns-opts: [] + dns-search: [ "{{ ansible_hostname }}" ] + host: [ "172.17.0.1:/var/run/docker.sock" ] \ No newline at end of file diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.52.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.52.yaml new file mode 100644 index 0000000000..4a4ccc0d91 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.52.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.30.51.52.yaml + +hostname: "s5-t22-sut1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-27,29-55,57-83,85-111" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-27,29-55,57-83,85-111" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-27,29-55,57-83,85-111" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,28,56,84" + vm: + nr_hugepages: 65536 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.50.49' +cpu_microarchitecture: "skylake" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.53.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.53.yaml new file mode 100644 index 0000000000..f0a50bfb71 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.53.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.30.51.53.yaml + +hostname: "s6-t22-tg1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-27,29-55,57-83,85-111" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-27,29-55,57-83,85-111" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-27,29-55,57-83,85-111" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,28,56,84" + vm: + nr_hugepages: 8192 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.50.50' +cpu_microarchitecture: "skylake" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.54.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.54.yaml new file mode 100644 index 0000000000..b51eed468f --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.54.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.30.51.54.yaml + +hostname: "s7-t23-sut1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-27,29-55,57-83,85-111" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-27,29-55,57-83,85-111" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-27,29-55,57-83,85-111" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,28,56,84" + vm: + nr_hugepages: 65536 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.50.51' +cpu_microarchitecture: "skylake" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.55.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.55.yaml new file mode 100644 index 0000000000..4b144fa69b --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.55.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.30.51.55.yaml + +hostname: "s8-t23-tg1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-27,29-55,57-83,85-111" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-27,29-55,57-83,85-111" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-27,29-55,57-83,85-111" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,28,56,84" + vm: + nr_hugepages: 8192 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.50.52' +cpu_microarchitecture: "skylake" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.56.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.56.yaml new file mode 100644 index 0000000000..613c9b110c --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.56.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.30.51.56.yaml + +hostname: "s9-t24-sut1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-27,29-55,57-83,85-111" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-27,29-55,57-83,85-111" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-27,29-55,57-83,85-111" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,28,56,84" + vm: + nr_hugepages: 65536 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.50.53' +cpu_microarchitecture: "skylake" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.57.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.57.yaml new file mode 100644 index 0000000000..52ec3d9911 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.57.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.30.51.57.yaml + +hostname: "s10-t24-tg1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-27,29-55,57-83,85-111" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-27,29-55,57-83,85-111" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-27,29-55,57-83,85-111" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,28,56,84" + vm: + nr_hugepages: 8192 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.50.54' +cpu_microarchitecture: "skylake" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.58.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.58.yaml new file mode 100644 index 0000000000..8971b4a7bb --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.58.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.30.51.58.yaml + +hostname: "s14-t32-sut1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-27,29-55,57-83,85-111" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-27,29-55,57-83,85-111" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-27,29-55,57-83,85-111" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,28,56,84" + vm: + nr_hugepages: 36864 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.50.55' +cpu_microarchitecture: "skylake" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.59.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.59.yaml new file mode 100644 index 0000000000..ed39581e30 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.59.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.30.51.59.yaml + +hostname: "s15-t32-sut2" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-27,29-55,57-83,85-111" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-27,29-55,57-83,85-111" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-27,29-55,57-83,85-111" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,28,56,84" + vm: + nr_hugepages: 36864 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.50.56' +cpu_microarchitecture: "skylake" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.60.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.60.yaml new file mode 100644 index 0000000000..1e3a49cf1d --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.60.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.30.51.60.yaml + +hostname: "s16-t32-tg1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-27,29-55,57-83,85-111" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-27,29-55,57-83,85-111" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-27,29-55,57-83,85-111" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,28,56,84" + vm: + nr_hugepages: 8192 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.50.57' +cpu_microarchitecture: "skylake" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.65.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.65.yaml new file mode 100644 index 0000000000..76b330ae2e --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.65.yaml @@ -0,0 +1,67 @@ +--- +# file: host_vars/10.30.51.65.yaml + +hostname: "s52-nomad" +inventory_ipmi_hostname: "10.30.50.65" +cpu_microarchitecture: "thunderx" + +# User management. +users: + - username: localadmin + groups: [adm, sudo] + password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXUbbhesOpvPF+KI8nq4TXvEO/Un1aU/ehZ9clCyw9C40xjDkX2BlcX8WeHxFHe7fjFaCd07Vg73rn/3M9uNDnDxvjH1GQ0twvi3iBTO4PkHBBfGF9qnE8MYzno6FvlsVKLuUuPbfm8kbOQ+ZDfdXq6gdtXh0hSYYkqC1heNPCNsqaakkB99Edyle+Ot0V7cpW+Yo2wo98KuX/cgUEhVoA8QnNVE7zaWcjSXBZEteoA4gLpAbV6p67/d6H/2ykHTidBViYTEsHco56tJoA4nTPuAupDOLBcWXgF5TAN6z1aCn2JA1DDfniLakgrZ5oVj2qHhUmbxQAtnKQfHADjqzV jlinkes@jlinkes" + - username: testuser + groups: [adm, sudo] + password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXUbbhesOpvPF+KI8nq4TXvEO/Un1aU/ehZ9clCyw9C40xjDkX2BlcX8WeHxFHe7fjFaCd07Vg73rn/3M9uNDnDxvjH1GQ0twvi3iBTO4PkHBBfGF9qnE8MYzno6FvlsVKLuUuPbfm8kbOQ+ZDfdXq6gdtXh0hSYYkqC1heNPCNsqaakkB99Edyle+Ot0V7cpW+Yo2wo98KuX/cgUEhVoA8QnNVE7zaWcjSXBZEteoA4gLpAbV6p67/d6H/2ykHTidBViYTEsHco56tJoA4nTPuAupDOLBcWXgF5TAN6z1aCn2JA1DDfniLakgrZ5oVj2qHhUmbxQAtnKQfHADjqzV jlinkes@jlinkes" + +# Nomad settings. +nomad_certificates: + - src: "{{ vault_nomad_v1_ca_file }}" + dest: "{{ nomad_ca_file }}" + - src: "{{ vault_nomad_v1_cert_file }}" + dest: "{{ nomad_cert_file }}" + - src: "{{ vault_nomad_v1_key_file }}" + dest: "{{ nomad_key_file }}" +nomad_datacenter: "yul1" +nomad_name: "{{ hostname }}-{{ ansible_architecture }}" +nomad_node_role: "client" +nomad_node_class: "builder" +nomad_options: + driver.raw_exec.enable: 1 + docker.cleanup.image: false + docker.privileged.enabled: true + docker.volumes.enabled: true + driver.whitelist: "docker,raw_exec,exec" +nomad_servers: [ "10.30.51.32:4647", "10.30.51.33:4647" ] +nomad_cpu_total_compute: "40000" + +# Consul settigs. +consul_nomad_integration: true +consul_certificates: + - src: "{{ vault_consul_v1_ca_file }}" + dest: "{{ consul_ca_file }}" + - src: "{{ vault_consul_v1_cert_file }}" + dest: "{{ consul_cert_file }}" + - src: "{{ vault_consul_v1_key_file }}" + dest: "{{ consul_key_file }}" +consul_datacenter: "yul1" +consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +consul_node_name: "{{ hostname }}" +consul_node_role: "client" +consul_retry_servers: + - "10.30.51.30" + - "10.30.51.32" + - "10.30.51.33" + +# Docker daemon settings. +docker_daemon: + # https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file + dns: [ "172.17.0.1" ] + dns-opts: [] + dns-search: [ "{{ansible_hostname}}" ] \ No newline at end of file diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.66.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.66.yaml new file mode 100644 index 0000000000..5223e4ba11 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.66.yaml @@ -0,0 +1,67 @@ +--- +# file: host_vars/10.30.51.66.yaml + +hostname: "s51-nomad" +inventory_ipmi_hostname: "10.30.50.66" +cpu_microarchitecture: "thunderx" + +# User management. +users: + - username: localadmin + groups: [adm, sudo] + password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXUbbhesOpvPF+KI8nq4TXvEO/Un1aU/ehZ9clCyw9C40xjDkX2BlcX8WeHxFHe7fjFaCd07Vg73rn/3M9uNDnDxvjH1GQ0twvi3iBTO4PkHBBfGF9qnE8MYzno6FvlsVKLuUuPbfm8kbOQ+ZDfdXq6gdtXh0hSYYkqC1heNPCNsqaakkB99Edyle+Ot0V7cpW+Yo2wo98KuX/cgUEhVoA8QnNVE7zaWcjSXBZEteoA4gLpAbV6p67/d6H/2ykHTidBViYTEsHco56tJoA4nTPuAupDOLBcWXgF5TAN6z1aCn2JA1DDfniLakgrZ5oVj2qHhUmbxQAtnKQfHADjqzV jlinkes@jlinkes" + - username: testuser + groups: [adm, sudo] + password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXUbbhesOpvPF+KI8nq4TXvEO/Un1aU/ehZ9clCyw9C40xjDkX2BlcX8WeHxFHe7fjFaCd07Vg73rn/3M9uNDnDxvjH1GQ0twvi3iBTO4PkHBBfGF9qnE8MYzno6FvlsVKLuUuPbfm8kbOQ+ZDfdXq6gdtXh0hSYYkqC1heNPCNsqaakkB99Edyle+Ot0V7cpW+Yo2wo98KuX/cgUEhVoA8QnNVE7zaWcjSXBZEteoA4gLpAbV6p67/d6H/2ykHTidBViYTEsHco56tJoA4nTPuAupDOLBcWXgF5TAN6z1aCn2JA1DDfniLakgrZ5oVj2qHhUmbxQAtnKQfHADjqzV jlinkes@jlinkes" + +# Nomad settings. +nomad_certificates: + - src: "{{ vault_nomad_v1_ca_file }}" + dest: "{{ nomad_ca_file }}" + - src: "{{ vault_nomad_v1_cert_file }}" + dest: "{{ nomad_cert_file }}" + - src: "{{ vault_nomad_v1_key_file }}" + dest: "{{ nomad_key_file }}" +nomad_datacenter: "yul1" +nomad_name: "{{ hostname }}-{{ ansible_architecture }}" +nomad_node_role: "client" +nomad_node_class: "builder" +nomad_options: + driver.raw_exec.enable: 1 + docker.cleanup.image: false + docker.privileged.enabled: true + docker.volumes.enabled: true + driver.whitelist: "docker,raw_exec,exec" +nomad_servers: [ "10.30.51.32:4647", "10.30.51.33:4647" ] +nomad_cpu_total_compute: "40000" + +# Consul settigs. +consul_nomad_integration: true +consul_certificates: + - src: "{{ vault_consul_v1_ca_file }}" + dest: "{{ consul_ca_file }}" + - src: "{{ vault_consul_v1_cert_file }}" + dest: "{{ consul_cert_file }}" + - src: "{{ vault_consul_v1_key_file }}" + dest: "{{ consul_key_file }}" +consul_datacenter: "yul1" +consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +consul_node_name: "{{ hostname }}" +consul_node_role: "client" +consul_retry_servers: + - "10.30.51.30" + - "10.30.51.32" + - "10.30.51.33" + +# Docker daemon settings. +docker_daemon: + # https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file + dns: [ "172.17.0.1" ] + dns-opts: [] + dns-search: [ "{{ansible_hostname}}" ] \ No newline at end of file diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.67.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.67.yaml new file mode 100644 index 0000000000..da9ed6da49 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.67.yaml @@ -0,0 +1,67 @@ +--- +# file: host_vars/10.30.51.67.yaml + +hostname: "s49-nomad" +inventory_ipmi_hostname: "10.30.50.67" +cpu_microarchitecture: "thunderx" + +# User management. +users: + - username: localadmin + groups: [adm, sudo] + password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXUbbhesOpvPF+KI8nq4TXvEO/Un1aU/ehZ9clCyw9C40xjDkX2BlcX8WeHxFHe7fjFaCd07Vg73rn/3M9uNDnDxvjH1GQ0twvi3iBTO4PkHBBfGF9qnE8MYzno6FvlsVKLuUuPbfm8kbOQ+ZDfdXq6gdtXh0hSYYkqC1heNPCNsqaakkB99Edyle+Ot0V7cpW+Yo2wo98KuX/cgUEhVoA8QnNVE7zaWcjSXBZEteoA4gLpAbV6p67/d6H/2ykHTidBViYTEsHco56tJoA4nTPuAupDOLBcWXgF5TAN6z1aCn2JA1DDfniLakgrZ5oVj2qHhUmbxQAtnKQfHADjqzV jlinkes@jlinkes" + - username: testuser + groups: [adm, sudo] + password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXUbbhesOpvPF+KI8nq4TXvEO/Un1aU/ehZ9clCyw9C40xjDkX2BlcX8WeHxFHe7fjFaCd07Vg73rn/3M9uNDnDxvjH1GQ0twvi3iBTO4PkHBBfGF9qnE8MYzno6FvlsVKLuUuPbfm8kbOQ+ZDfdXq6gdtXh0hSYYkqC1heNPCNsqaakkB99Edyle+Ot0V7cpW+Yo2wo98KuX/cgUEhVoA8QnNVE7zaWcjSXBZEteoA4gLpAbV6p67/d6H/2ykHTidBViYTEsHco56tJoA4nTPuAupDOLBcWXgF5TAN6z1aCn2JA1DDfniLakgrZ5oVj2qHhUmbxQAtnKQfHADjqzV jlinkes@jlinkes" + +# Nomad settings. +nomad_certificates: + - src: "{{ vault_nomad_v1_ca_file }}" + dest: "{{ nomad_ca_file }}" + - src: "{{ vault_nomad_v1_cert_file }}" + dest: "{{ nomad_cert_file }}" + - src: "{{ vault_nomad_v1_key_file }}" + dest: "{{ nomad_key_file }}" +nomad_datacenter: "yul1" +nomad_name: "{{ hostname }}-{{ ansible_architecture }}" +nomad_node_role: "client" +nomad_node_class: "builder" +nomad_options: + driver.raw_exec.enable: 1 + docker.cleanup.image: false + docker.privileged.enabled: true + docker.volumes.enabled: true + driver.whitelist: "docker,raw_exec,exec" +nomad_servers: [ "10.30.51.32:4647", "10.30.51.33:4647" ] +nomad_cpu_total_compute: "40000" + +# Consul settigs. +consul_nomad_integration: true +consul_certificates: + - src: "{{ vault_consul_v1_ca_file }}" + dest: "{{ consul_ca_file }}" + - src: "{{ vault_consul_v1_cert_file }}" + dest: "{{ consul_cert_file }}" + - src: "{{ vault_consul_v1_key_file }}" + dest: "{{ consul_key_file }}" +consul_datacenter: "yul1" +consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +consul_node_name: "{{ hostname }}" +consul_node_role: "client" +consul_retry_servers: + - "10.30.51.30" + - "10.30.51.32" + - "10.30.51.33" + +# Docker daemon settings. +docker_daemon: + # https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file + dns: [ "172.17.0.1" ] + dns-opts: [] + dns-search: [ "{{ansible_hostname}}" ] \ No newline at end of file diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.68.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.68.yaml new file mode 100644 index 0000000000..0295af4dbd --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.68.yaml @@ -0,0 +1,67 @@ +--- +# file: host_vars/10.30.51.68.yaml + +hostname: "s50-nomad" +inventory_ipmi_hostname: "10.30.50.68" +cpu_microarchitecture: "thunderx" + +# User management. +users: + - username: localadmin + groups: [adm, sudo] + password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXUbbhesOpvPF+KI8nq4TXvEO/Un1aU/ehZ9clCyw9C40xjDkX2BlcX8WeHxFHe7fjFaCd07Vg73rn/3M9uNDnDxvjH1GQ0twvi3iBTO4PkHBBfGF9qnE8MYzno6FvlsVKLuUuPbfm8kbOQ+ZDfdXq6gdtXh0hSYYkqC1heNPCNsqaakkB99Edyle+Ot0V7cpW+Yo2wo98KuX/cgUEhVoA8QnNVE7zaWcjSXBZEteoA4gLpAbV6p67/d6H/2ykHTidBViYTEsHco56tJoA4nTPuAupDOLBcWXgF5TAN6z1aCn2JA1DDfniLakgrZ5oVj2qHhUmbxQAtnKQfHADjqzV jlinkes@jlinkes" + - username: testuser + groups: [adm, sudo] + password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXUbbhesOpvPF+KI8nq4TXvEO/Un1aU/ehZ9clCyw9C40xjDkX2BlcX8WeHxFHe7fjFaCd07Vg73rn/3M9uNDnDxvjH1GQ0twvi3iBTO4PkHBBfGF9qnE8MYzno6FvlsVKLuUuPbfm8kbOQ+ZDfdXq6gdtXh0hSYYkqC1heNPCNsqaakkB99Edyle+Ot0V7cpW+Yo2wo98KuX/cgUEhVoA8QnNVE7zaWcjSXBZEteoA4gLpAbV6p67/d6H/2ykHTidBViYTEsHco56tJoA4nTPuAupDOLBcWXgF5TAN6z1aCn2JA1DDfniLakgrZ5oVj2qHhUmbxQAtnKQfHADjqzV jlinkes@jlinkes" + +# Nomad settings. +nomad_certificates: + - src: "{{ vault_nomad_v1_ca_file }}" + dest: "{{ nomad_ca_file }}" + - src: "{{ vault_nomad_v1_cert_file }}" + dest: "{{ nomad_cert_file }}" + - src: "{{ vault_nomad_v1_key_file }}" + dest: "{{ nomad_key_file }}" +nomad_datacenter: "yul1" +nomad_name: "{{ hostname }}-{{ ansible_architecture }}" +nomad_node_role: "client" +nomad_node_class: "builder" +nomad_options: + driver.raw_exec.enable: 1 + docker.cleanup.image: false + docker.privileged.enabled: true + docker.volumes.enabled: true + driver.whitelist: "docker,raw_exec,exec" +nomad_servers: [ "10.30.51.32:4647", "10.30.51.33:4647" ] +nomad_cpu_total_compute: "40000" + +# Consul settigs. +consul_nomad_integration: true +consul_certificates: + - src: "{{ vault_consul_v1_ca_file }}" + dest: "{{ consul_ca_file }}" + - src: "{{ vault_consul_v1_cert_file }}" + dest: "{{ consul_cert_file }}" + - src: "{{ vault_consul_v1_key_file }}" + dest: "{{ consul_key_file }}" +consul_datacenter: "yul1" +consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +consul_node_name: "{{ hostname }}" +consul_node_role: "client" +consul_retry_servers: + - "10.30.51.30" + - "10.30.51.32" + - "10.30.51.33" + +# Docker daemon settings. +docker_daemon: + # https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file + dns: [ "172.17.0.1" ] + dns-opts: [] + dns-search: [ "{{ansible_hostname}}" ] \ No newline at end of file diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.69.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.69.yaml new file mode 100644 index 0000000000..264c62cf38 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.69.yaml @@ -0,0 +1,37 @@ +--- +# file: host_vars/10.30.51.69.yaml + +hostname: "s27-t211-sut1" +grub: + audit: "0" + intel_iommu: "on" + isolcpus: "1-27,29-55" + nmi_watchdog: "0" + nohz_full: "1-27,29-55" + nosoftlockup: True + processor.max_cstate: "1" + rcu_nocbs: "1-27,29-55" +sysctl: + kernel: + watchdog_cpumask: "0,28" + vm: + nr_hugepages: 57344 + max_map_count: 20000 + +inventory_ipmi_hostname: "10.30.50.69" +cpu_microarchitecture: "thunderx2" + +# User management. +users: + - username: localadmin + groups: [adm, sudo] + password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXUbbhesOpvPF+KI8nq4TXvEO/Un1aU/ehZ9clCyw9C40xjDkX2BlcX8WeHxFHe7fjFaCd07Vg73rn/3M9uNDnDxvjH1GQ0twvi3iBTO4PkHBBfGF9qnE8MYzno6FvlsVKLuUuPbfm8kbOQ+ZDfdXq6gdtXh0hSYYkqC1heNPCNsqaakkB99Edyle+Ot0V7cpW+Yo2wo98KuX/cgUEhVoA8QnNVE7zaWcjSXBZEteoA4gLpAbV6p67/d6H/2ykHTidBViYTEsHco56tJoA4nTPuAupDOLBcWXgF5TAN6z1aCn2JA1DDfniLakgrZ5oVj2qHhUmbxQAtnKQfHADjqzV jlinkes@jlinkes" + - username: testuser + groups: [adm, sudo] + password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXUbbhesOpvPF+KI8nq4TXvEO/Un1aU/ehZ9clCyw9C40xjDkX2BlcX8WeHxFHe7fjFaCd07Vg73rn/3M9uNDnDxvjH1GQ0twvi3iBTO4PkHBBfGF9qnE8MYzno6FvlsVKLuUuPbfm8kbOQ+ZDfdXq6gdtXh0hSYYkqC1heNPCNsqaakkB99Edyle+Ot0V7cpW+Yo2wo98KuX/cgUEhVoA8QnNVE7zaWcjSXBZEteoA4gLpAbV6p67/d6H/2ykHTidBViYTEsHco56tJoA4nTPuAupDOLBcWXgF5TAN6z1aCn2JA1DDfniLakgrZ5oVj2qHhUmbxQAtnKQfHADjqzV jlinkes@jlinkes" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.70.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.70.yaml new file mode 100644 index 0000000000..caf80df9ca --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.70.yaml @@ -0,0 +1,72 @@ +--- +# file: host_vars/10.30.51.70.yaml + +hostname: "s55-t36-sut1" +inventory_ipmi_hostname: "10.30.50.70" +vfs_data_file: "csit-initialize-vfs-tx2.sh" +grub: + hugepagesz: "2M" + nr_hugepages: 32768 +cpu_microarchitecture: "thunderx2" + +# User management. +users: + - username: localadmin + groups: [adm, sudo] + password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXUbbhesOpvPF+KI8nq4TXvEO/Un1aU/ehZ9clCyw9C40xjDkX2BlcX8WeHxFHe7fjFaCd07Vg73rn/3M9uNDnDxvjH1GQ0twvi3iBTO4PkHBBfGF9qnE8MYzno6FvlsVKLuUuPbfm8kbOQ+ZDfdXq6gdtXh0hSYYkqC1heNPCNsqaakkB99Edyle+Ot0V7cpW+Yo2wo98KuX/cgUEhVoA8QnNVE7zaWcjSXBZEteoA4gLpAbV6p67/d6H/2ykHTidBViYTEsHco56tJoA4nTPuAupDOLBcWXgF5TAN6z1aCn2JA1DDfniLakgrZ5oVj2qHhUmbxQAtnKQfHADjqzV jlinkes@jlinkes" + - username: testuser + groups: [adm, sudo] + password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXUbbhesOpvPF+KI8nq4TXvEO/Un1aU/ehZ9clCyw9C40xjDkX2BlcX8WeHxFHe7fjFaCd07Vg73rn/3M9uNDnDxvjH1GQ0twvi3iBTO4PkHBBfGF9qnE8MYzno6FvlsVKLuUuPbfm8kbOQ+ZDfdXq6gdtXh0hSYYkqC1heNPCNsqaakkB99Edyle+Ot0V7cpW+Yo2wo98KuX/cgUEhVoA8QnNVE7zaWcjSXBZEteoA4gLpAbV6p67/d6H/2ykHTidBViYTEsHco56tJoA4nTPuAupDOLBcWXgF5TAN6z1aCn2JA1DDfniLakgrZ5oVj2qHhUmbxQAtnKQfHADjqzV jlinkes@jlinkes" + +# Nomad settings. +nomad_certificates: + - src: "{{ vault_nomad_v1_ca_file }}" + dest: "{{ nomad_ca_file }}" + - src: "{{ vault_nomad_v1_cert_file }}" + dest: "{{ nomad_cert_file }}" + - src: "{{ vault_nomad_v1_key_file }}" + dest: "{{ nomad_key_file }}" +nomad_datacenter: "yul1" +nomad_name: "{{ hostname }}-{{ ansible_architecture }}" +nomad_node_role: "client" +nomad_node_class: "csitarm" +nomad_options: + driver.raw_exec.enable: 1 + docker.cleanup.image: false + docker.privileged.enabled: true + docker.volumes.enabled: true + driver.whitelist: "docker,raw_exec,exec" +nomad_servers: [ "10.30.51.32:4647", "10.30.51.33:4647" ] +nomad_cpu_total_compute: "40000" + +# Consul settigs. +consul_nomad_integration: true +consul_certificates: + - src: "{{ vault_consul_v1_ca_file }}" + dest: "{{ consul_ca_file }}" + - src: "{{ vault_consul_v1_cert_file }}" + dest: "{{ consul_cert_file }}" + - src: "{{ vault_consul_v1_key_file }}" + dest: "{{ consul_key_file }}" +consul_datacenter: "yul1" +consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +consul_node_name: "{{ hostname }}" +consul_node_role: "client" +consul_retry_servers: + - "10.30.51.30" + - "10.30.51.32" + - "10.30.51.33" + +# Docker settings. +docker_daemon: + # https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file + dns: [ "172.17.0.1" ] + dns-opts: [] + dns-search: [ "{{ ansible_hostname }}" ] + storage-driver: "overlay2" \ No newline at end of file diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.71.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.71.yaml new file mode 100644 index 0000000000..1be446f288 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.71.yaml @@ -0,0 +1,72 @@ +--- +# file: host_vars/10.30.51.71.yaml + +hostname: "s56-t37-sut1" +inventory_ipmi_hostname: "10.30.50.71" +vfs_data_file: "csit-initialize-vfs-tx2.sh" +grub: + hugepagesz: "2M" + nr_hugepages: 32768 +cpu_microarchitecture: "thunderx2" + +# User management. +users: + - username: localadmin + groups: [adm, sudo] + password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXUbbhesOpvPF+KI8nq4TXvEO/Un1aU/ehZ9clCyw9C40xjDkX2BlcX8WeHxFHe7fjFaCd07Vg73rn/3M9uNDnDxvjH1GQ0twvi3iBTO4PkHBBfGF9qnE8MYzno6FvlsVKLuUuPbfm8kbOQ+ZDfdXq6gdtXh0hSYYkqC1heNPCNsqaakkB99Edyle+Ot0V7cpW+Yo2wo98KuX/cgUEhVoA8QnNVE7zaWcjSXBZEteoA4gLpAbV6p67/d6H/2ykHTidBViYTEsHco56tJoA4nTPuAupDOLBcWXgF5TAN6z1aCn2JA1DDfniLakgrZ5oVj2qHhUmbxQAtnKQfHADjqzV jlinkes@jlinkes" + - username: testuser + groups: [adm, sudo] + password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXUbbhesOpvPF+KI8nq4TXvEO/Un1aU/ehZ9clCyw9C40xjDkX2BlcX8WeHxFHe7fjFaCd07Vg73rn/3M9uNDnDxvjH1GQ0twvi3iBTO4PkHBBfGF9qnE8MYzno6FvlsVKLuUuPbfm8kbOQ+ZDfdXq6gdtXh0hSYYkqC1heNPCNsqaakkB99Edyle+Ot0V7cpW+Yo2wo98KuX/cgUEhVoA8QnNVE7zaWcjSXBZEteoA4gLpAbV6p67/d6H/2ykHTidBViYTEsHco56tJoA4nTPuAupDOLBcWXgF5TAN6z1aCn2JA1DDfniLakgrZ5oVj2qHhUmbxQAtnKQfHADjqzV jlinkes@jlinkes" + +# Nomad settings. +nomad_certificates: + - src: "{{ vault_nomad_v1_ca_file }}" + dest: "{{ nomad_ca_file }}" + - src: "{{ vault_nomad_v1_cert_file }}" + dest: "{{ nomad_cert_file }}" + - src: "{{ vault_nomad_v1_key_file }}" + dest: "{{ nomad_key_file }}" +nomad_datacenter: "yul1" +nomad_name: "{{ hostname }}-{{ ansible_architecture }}" +nomad_node_role: "client" +nomad_node_class: "csitarm" +nomad_options: + driver.raw_exec.enable: 1 + docker.cleanup.image: false + docker.privileged.enabled: true + docker.volumes.enabled: true + driver.whitelist: "docker,raw_exec,exec" +nomad_servers: [ "10.30.51.32:4647", "10.30.51.33:4647" ] +nomad_cpu_total_compute: "40000" + +# Consul settigs. +consul_nomad_integration: true +consul_certificates: + - src: "{{ vault_consul_v1_ca_file }}" + dest: "{{ consul_ca_file }}" + - src: "{{ vault_consul_v1_cert_file }}" + dest: "{{ consul_cert_file }}" + - src: "{{ vault_consul_v1_key_file }}" + dest: "{{ consul_key_file }}" +consul_datacenter: "yul1" +consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +consul_node_name: "{{ hostname }}" +consul_node_role: "client" +consul_retry_servers: + - "10.30.51.30" + - "10.30.51.32" + - "10.30.51.33" + +# Docker settings. +docker_daemon: + # https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file + dns: [ "172.17.0.1" ] + dns-opts: [] + dns-search: [ "{{ ansible_hostname }}" ] + storage-driver: "overlay2" \ No newline at end of file diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.10.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.10.yaml new file mode 100644 index 0000000000..05f1a19576 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.10.yaml @@ -0,0 +1,30 @@ +--- +# file: host_vars/10.32.8.10.yaml + +hostname: "s28-t26t35-tg1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-27,29-55,57-83,85-111" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-27,29-55,57-83,85-111" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-27,29-55,57-83,85-111" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,28,56,84" + vm: + nr_hugepages: 16384 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.55.10' +cpu_microarchitecture: "skylake" +docker_tg: true diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.11.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.11.yaml new file mode 100644 index 0000000000..c6cb68cb5d --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.11.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.32.8.11.yaml + +hostname: "s29-t26-sut1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-5" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-5" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-5" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0" + vm: + nr_hugepages: 8192 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.55.11' +cpu_microarchitecture: "denverton" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.12.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.12.yaml new file mode 100644 index 0000000000..5d350647a9 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.12.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.32.8.12.yaml + +hostname: "s30-t35-sut1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-5" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-5" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-5" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0" + vm: + nr_hugepages: 8192 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.55.12' +cpu_microarchitecture: "denverton" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.13.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.13.yaml new file mode 100644 index 0000000000..aff600265c --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.13.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.32.8.13.yaml + +hostname: "s31-t35-sut2" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-5" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-5" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-5" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0" + vm: + nr_hugepages: 8192 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.55.13' +cpu_microarchitecture: "denverton" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.14.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.14.yaml new file mode 100644 index 0000000000..c609cc7875 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.14.yaml @@ -0,0 +1,70 @@ +--- +# file: host_vars/10.32.8.14.yaml + +hostname: "s46-nomad" +inventory_ipmi_hostname: "10.30.55.14" + +# User management. +users: + - username: localadmin + groups: [adm, sudo] + password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - username: testuser + groups: [adm, sudo] + password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + +# Nomad settings. +nomad_certificates: + - src: "{{ vault_nomad_v3_ca_file }}" + dest: "{{ nomad_ca_file }}" + - src: "{{ vault_nomad_v3_cert_file }}" + dest: "{{ nomad_cert_file }}" + - src: "{{ vault_nomad_v3_key_file }}" + dest: "{{ nomad_key_file }}" +nomad_datacenter: "yul1" +nomad_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +nomad_name: "{{ hostname }}-{{ ansible_architecture }}" +nomad_node_role: "client" +nomad_node_class: "builder" +nomad_options: + driver.raw_exec.enable: 1 + docker.cleanup.image: true + docker.privileged.enabled: true + docker.volumes.enabled: true + driver.whitelist: "docker,raw_exec,exec" + fingerprint.network.disallow_link_local: true +nomad_retry_servers: [ "10.30.51.30", "10.30.51.32", "10.30.51.33" ] +nomad_servers: [ "10.30.51.32:4647", "10.30.51.33:4647", "10.30.51.30:4647" ] +nomad_volumes: + - name: "prod-volume-data1-1" + path: "/data" + read_only: false + +# Consul settings. +consul_nomad_integration: true +consul_certificates: + - src: "{{ vault_consul_v1_ca_file }}" + dest: "{{ consul_ca_file }}" + - src: "{{ vault_consul_v1_cert_file }}" + dest: "{{ consul_cert_file }}" + - src: "{{ vault_consul_v1_key_file }}" + dest: "{{ consul_key_file }}" +consul_datacenter: "yul1" +consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +consul_node_name: "{{ hostname }}" +consul_node_role: "client" +consul_retry_servers: + - "10.30.51.30" + - "10.30.51.32" + - "10.30.51.33" + +# Docker daemon settings. +docker_daemon: + # https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file + dns: [ "172.17.0.1" ] + dns-opts: [] + dns-search: [ "{{ansible_hostname}}" ] \ No newline at end of file diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.15.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.15.yaml new file mode 100644 index 0000000000..c9825c7830 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.15.yaml @@ -0,0 +1,70 @@ +--- +# file: host_vars/10.32.8.15.yaml + +hostname: "s47-nomad" +inventory_ipmi_hostname: "10.30.55.15" + +# User management. +users: + - username: localadmin + groups: [adm, sudo] + password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - username: testuser + groups: [adm, sudo] + password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + +# Nomad settings. +nomad_certificates: + - src: "{{ vault_nomad_v3_ca_file }}" + dest: "{{ nomad_ca_file }}" + - src: "{{ vault_nomad_v3_cert_file }}" + dest: "{{ nomad_cert_file }}" + - src: "{{ vault_nomad_v3_key_file }}" + dest: "{{ nomad_key_file }}" +nomad_datacenter: "yul1" +nomad_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +nomad_name: "{{ hostname }}-{{ ansible_architecture }}" +nomad_node_role: "client" +nomad_node_class: "builder" +nomad_options: + driver.raw_exec.enable: 1 + docker.cleanup.image: true + docker.privileged.enabled: true + docker.volumes.enabled: true + driver.whitelist: "docker,raw_exec,exec" + fingerprint.network.disallow_link_local: true +nomad_retry_servers: [ "10.30.51.30", "10.30.51.32", "10.30.51.33" ] +nomad_servers: [ "10.30.51.32:4647", "10.30.51.33:4647", "10.30.51.30:4647" ] +nomad_volumes: + - name: "prod-volume-data1-1" + path: "/data" + read_only: false + +# Consul settigs. +consul_nomad_integration: true +consul_certificates: + - src: "{{ vault_consul_v1_ca_file }}" + dest: "{{ consul_ca_file }}" + - src: "{{ vault_consul_v1_cert_file }}" + dest: "{{ consul_cert_file }}" + - src: "{{ vault_consul_v1_key_file }}" + dest: "{{ consul_key_file }}" +consul_datacenter: "yul1" +consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +consul_node_name: "{{ hostname }}" +consul_node_role: "client" +consul_retry_servers: + - "10.30.51.30" + - "10.30.51.32" + - "10.30.51.33" + +# Docker daemon settings. +docker_daemon: + # https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file + dns: [ "172.17.0.1" ] + dns-opts: [] + dns-search: [ "{{ansible_hostname}}" ] \ No newline at end of file diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.16.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.16.yaml new file mode 100644 index 0000000000..1d952556f9 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.16.yaml @@ -0,0 +1,70 @@ +--- +# file: host_vars/10.32.8.16.yaml + +hostname: "s48-nomad" +inventory_ipmi_hostname: "10.30.55.16" + +# User management. +users: + - username: localadmin + groups: [adm, sudo] + password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - username: testuser + groups: [adm, sudo] + password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + +# Nomad settings. +nomad_certificates: + - src: "{{ vault_nomad_v3_ca_file }}" + dest: "{{ nomad_ca_file }}" + - src: "{{ vault_nomad_v3_cert_file }}" + dest: "{{ nomad_cert_file }}" + - src: "{{ vault_nomad_v3_key_file }}" + dest: "{{ nomad_key_file }}" +nomad_datacenter: "yul1" +nomad_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +nomad_name: "{{ hostname }}-{{ ansible_architecture }}" +nomad_node_role: "client" +nomad_node_class: "builder" +nomad_options: + driver.raw_exec.enable: 1 + docker.cleanup.image: true + docker.privileged.enabled: true + docker.volumes.enabled: true + driver.whitelist: "docker,raw_exec,exec" + fingerprint.network.disallow_link_local: true +nomad_retry_servers: [ "10.30.51.30", "10.30.51.32", "10.30.51.33" ] +nomad_servers: [ "10.30.51.32:4647", "10.30.51.33:4647", "10.30.51.30:4647" ] +nomad_volumes: + - name: "prod-volume-data1-1" + path: "/data" + read_only: false + +# Consul settigs. +consul_nomad_integration: true +consul_certificates: + - src: "{{ vault_consul_v1_ca_file }}" + dest: "{{ consul_ca_file }}" + - src: "{{ vault_consul_v1_cert_file }}" + dest: "{{ consul_cert_file }}" + - src: "{{ vault_consul_v1_key_file }}" + dest: "{{ consul_key_file }}" +consul_datacenter: "yul1" +consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +consul_node_name: "{{ hostname }}" +consul_node_role: "client" +consul_retry_servers: + - "10.30.51.30" + - "10.30.51.32" + - "10.30.51.33" + +# Docker daemon settings. +docker_daemon: + # https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file + dns: [ "172.17.0.1" ] + dns-opts: [] + dns-search: [ "{{ansible_hostname}}" ] \ No newline at end of file diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.17.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.17.yaml new file mode 100644 index 0000000000..2feac858d9 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.17.yaml @@ -0,0 +1,70 @@ +--- +# file: host_vars/10.32.8.17.yaml + +hostname: "s57-nomad" +inventory_ipmi_hostname: "10.30.55.17" + +# User management. +users: + - username: localadmin + groups: [adm, sudo] + password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - username: testuser + groups: [adm, sudo] + password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + +# Nomad settings. +nomad_certificates: + - src: "{{ vault_nomad_v3_ca_file }}" + dest: "{{ nomad_ca_file }}" + - src: "{{ vault_nomad_v3_cert_file }}" + dest: "{{ nomad_cert_file }}" + - src: "{{ vault_nomad_v3_key_file }}" + dest: "{{ nomad_key_file }}" +nomad_datacenter: "yul1" +nomad_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +nomad_name: "{{ hostname }}-{{ ansible_architecture }}" +nomad_node_role: "client" +nomad_node_class: "builder" +nomad_options: + driver.raw_exec.enable: 1 + docker.cleanup.image: true + docker.privileged.enabled: true + docker.volumes.enabled: true + driver.whitelist: "docker,raw_exec,exec" + fingerprint.network.disallow_link_local: true +nomad_retry_servers: [ "10.30.51.30", "10.30.51.32", "10.30.51.33" ] +nomad_servers: [ "10.30.51.32:4647", "10.30.51.33:4647", "10.30.51.30:4647" ] +nomad_volumes: + - name: "prod-volume-data1-1" + path: "/data" + read_only: false + +# Consul settigs. +consul_nomad_integration: true +consul_certificates: + - src: "{{ vault_consul_v1_ca_file }}" + dest: "{{ consul_ca_file }}" + - src: "{{ vault_consul_v1_cert_file }}" + dest: "{{ consul_cert_file }}" + - src: "{{ vault_consul_v1_key_file }}" + dest: "{{ consul_key_file }}" +consul_datacenter: "yul1" +consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +consul_node_name: "{{ hostname }}" +consul_node_role: "client" +consul_retry_servers: + - "10.30.51.30" + - "10.30.51.32" + - "10.30.51.33" + +# Docker daemon settings. +docker_daemon: + # https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file + dns: [ "172.17.0.1" ] + dns-opts: [] + dns-search: [ "{{ansible_hostname}}" ] \ No newline at end of file diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.18.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.18.yaml new file mode 100644 index 0000000000..6fe0dbc14d --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.18.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.32.8.18.yaml + +hostname: "s33-t27-sut1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-23,25-47,49-71,73-95" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-23,25-47,49-71,73-95" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-23,25-47,49-71,73-95" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,24,48,72" + vm: + nr_hugepages: 65536 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.55.18' +cpu_microarchitecture: "cascadelake" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.19.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.19.yaml new file mode 100644 index 0000000000..f963d4b888 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.19.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.32.8.19.yaml + +hostname: "s34-t27-tg1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-27,29-55,57-83,85-111" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-27,29-55,57-83,85-111" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-27,29-55,57-83,85-111" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,28,56,84" + vm: + nr_hugepages: 8192 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.55.19' +cpu_microarchitecture: "cascadelake" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.20.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.20.yaml new file mode 100644 index 0000000000..6913550686 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.20.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.32.8.20.yaml + +hostname: "s35-t28-sut1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-23,25-47,49-71,73-95" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-23,25-47,49-71,73-95" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-23,25-47,49-71,73-95" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,24,48,72" + vm: + nr_hugepages: 65536 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.55.20' +cpu_microarchitecture: "cascadelake" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.21.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.21.yaml new file mode 100644 index 0000000000..98cffb3392 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.21.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.32.8.21.yaml + +hostname: "s36-t28-tg1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-27,29-55,57-83,85-111" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-27,29-55,57-83,85-111" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-27,29-55,57-83,85-111" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,28,56,84" + vm: + nr_hugepages: 8192 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.55.21' +cpu_microarchitecture: "cascadelake" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.22.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.22.yaml new file mode 100644 index 0000000000..f58b2ce07d --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.22.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.32.8.22.yaml + +hostname: "s37-t29-sut1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-23,25-47,49-71,73-95" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-23,25-47,49-71,73-95" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-23,25-47,49-71,73-95" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,24,48,72" + vm: + nr_hugepages: 65536 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.55.22' +cpu_microarchitecture: "cascadelake" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.23.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.23.yaml new file mode 100644 index 0000000000..364ab8b295 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.23.yaml @@ -0,0 +1,29 @@ +--- +# file: host_vars/10.32.8.23.yaml + +hostname: "s38-t29-tg1" +grub: + audit: "0" + hpet: "disable" + intel_idle.max_cstate: "1" + intel_iommu: "on" + intel_pstate: "disable" + iommu: "pt" + isolcpus: "1-27,29-55,57-83,85-111" + mce: "off" + nmi_watchdog: "0" + nohz_full: "1-27,29-55,57-83,85-111" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "1" + rcu_nocbs: "1-27,29-55,57-83,85-111" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,28,56,84" + vm: + nr_hugepages: 8192 + max_map_count: 20000 + +inventory_ipmi_hostname: '10.30.55.23' +cpu_microarchitecture: "cascadelake" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.24.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.24.yaml new file mode 100644 index 0000000000..dc8a060cc0 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.24.yaml @@ -0,0 +1,26 @@ +--- +# file: host_vars/10.32.8.24.yaml + +hostname: "s60-t210-sut1" +grub: + amd_iommu: "on" + audit: "0" + hpet: "disable" + iommu: "pt" + isolcpus: "1-15,17-31,33-47,49-63" + nmi_watchdog: "0" + nohz_full: "off" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "0" + rcu_nocbs: "1-15,17-31,33-47,49-63" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,16,32,48" + vm: + nr_hugepages: 8192 + max_map_count: 20000 + +inventory_ipmi_hostname: "10.30.55.24" +cpu_microarchitecture: "epyc" diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.25.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.25.yaml new file mode 100644 index 0000000000..8c99bf3cae --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.32.8.25.yaml @@ -0,0 +1,26 @@ +--- +# file: host_vars/10.32.8.25.yaml + +hostname: "s61-t210-tg1" +grub: + amd_iommu: "on" + audit: "0" + hpet: "disable" + iommu: "pt" + isolcpus: "1-15,17-31,33-47,49-63" + nmi_watchdog: "0" + nohz_full: "off" + nosoftlockup: True + numa_balancing: "disable" + processor.max_cstate: "0" + rcu_nocbs: "1-15,17-31,33-47,49-63" + tsc: "reliable" +sysctl: + kernel: + watchdog_cpumask: "0,16,32,48" + vm: + nr_hugepages: 8192 + max_map_count: 20000 + +inventory_ipmi_hostname: "10.30.55.25" +cpu_microarchitecture: "epyc" diff --git a/fdio.infra.ansible/inventories/lf_inventory/hosts b/fdio.infra.ansible/inventories/lf_inventory/hosts new file mode 100644 index 0000000000..cc0e0b3986 --- /dev/null +++ b/fdio.infra.ansible/inventories/lf_inventory/hosts @@ -0,0 +1,74 @@ +all: + children: + tg: + hosts: + 10.30.51.16: #t1-tg1 - haswell + 10.30.51.20: #t2-tg1 - haswell + 10.30.51.24: #t3-tg1 - haswell + 10.30.51.45: #s4-t21-tg1 - skylake + 10.30.51.48: #s13-t31-tg1 - skylake + 10.30.51.49: #s19-t33t34-tg1 - skylake + 10.30.51.53: #s6-t22-tg1 - skylake + 10.30.51.55: #s8-t23-tg1 - skylake + 10.30.51.57: #s10-t24-tg1 - skylake + 10.30.51.60: #s16-t32-tg1 - skylake + 10.32.8.10: #s28-t26t35-tg1 - skylake + 10.32.8.19: #s34-t27-tg1 - cascadelake + 10.32.8.21: #s36-t28-tg1 - cascadelake + 10.32.8.23: #s38-t29-tg1 - cascadelake + 10.32.8.25: #s61-t210-tg1 - epyc + sut: + hosts: + 10.30.51.17: #t1-sut1 - haswell + 10.30.51.18: #t1-sut2 - haswell + 10.30.51.21: #t2-sut1 - haswell + 10.30.51.22: #t2-sut2 - haswell + 10.30.51.25: #t3-sut1 - haswell + 10.30.51.26: #t3-sut2 - haswell + 10.30.51.36: #s17-t33-sut1 - taishan + 10.30.51.37: #s18-t33-sut2 - taishan + 10.30.51.44: #s3-t21-sut1 - skylake + 10.30.51.46: #s11-t31-sut1 - skylake + 10.30.51.47: #s12-t31-sut2 - skylake + 10.30.51.52: #s5-t22-sut1 - skylake + 10.30.51.54: #s7-t23-sut1 - skylake + 10.30.51.56: #s9-t24-sut1 - skylake + 10.30.51.58: #s14-t32-sut1 - skylake + 10.30.51.59: #s15-t32-sut2 - skylake + 10.32.8.11: #s29-t26-sut1 - denverton + 10.32.8.12: #s30-t35-sut1 - denverton + 10.32.8.13: #s31-t35-sut2 - denverton + 10.32.8.18: #s33-t27-sut1 - cascadelake + 10.32.8.20: #s35-t28-sut1 - cascadelake + 10.32.8.22: #s37-t29-sut1 - cascadelake + 10.32.8.24: #s60-t210-sut1 - epyc + 10.30.51.69: #s27-t34-sut1 - thunderx2 9975 + vpp_device: + # Note: vpp_device hosts are also nomad client hosts + hosts: + 10.30.51.50: #s1-t11-sut1 - skylake + 10.30.51.51: #s2-t12-sut1 - skylake + 10.30.51.70: #s55-t36-sut1 - thunderx2 9980 + 10.30.51.71: #s56-t37-sut1 - thunderx2 9980 + nomad: + hosts: + 10.30.51.28: #s41-nomad - haswell + 10.30.51.29: #s39-nomad - haswell + 10.30.51.30: #s40-nomad - haswell + 10.30.51.32: #s42-nomad - ivy bridge + 10.30.51.33: #s43-nomad - ivy bridge + 10.30.51.34: #s44-nomad - ivy bridge + 10.30.51.35: #s45-nomad - ivy bridge + 10.32.8.14: #s46-nomad - skylake + 10.32.8.15: #s47-nomad - skylake + 10.32.8.16: #s48-nomad - skylake + 10.32.8.17: #s57-nomad - skylake + 10.30.51.39: #s53-nomad - thunderx 88xx + 10.30.51.40: #s54-nomad - thunderx 88xx + 10.30.51.65: #s52-nomad - thunderx 88xx + 10.30.51.66: #s51-nomad - thunderx 88xx + 10.30.51.67: #s49-nomad - thunderx 88xx + 10.30.51.68: #s50-nomad - thunderx 88xx + dev: + hosts: + 10.30.51.38: #fdio-marvell-dev - thunderx 88xx diff --git a/fdio.infra.ansible/inventories/sample_inventory/group_vars/all.yaml b/fdio.infra.ansible/inventories/sample_inventory/group_vars/all.yaml new file mode 100644 index 0000000000..f9938d20da --- /dev/null +++ b/fdio.infra.ansible/inventories/sample_inventory/group_vars/all.yaml @@ -0,0 +1,5 @@ +--- +# file: sample_inventory/group_vars/all.yaml + +# Ansible interpreter (for PIP) +ansible_python_interpreter: "python3" diff --git a/fdio.infra.ansible/inventories/sample_inventory/host_vars/1.1.1.1.yaml b/fdio.infra.ansible/inventories/sample_inventory/host_vars/1.1.1.1.yaml new file mode 100644 index 0000000000..fc4a8af1a8 --- /dev/null +++ b/fdio.infra.ansible/inventories/sample_inventory/host_vars/1.1.1.1.yaml @@ -0,0 +1,17 @@ +--- +# file: host_vars/x.x.x.x.yaml + +hostname: 't1-tg1' +grub: + - isolcpus: '1-27,29-55,57-83,85-111' + - nohz_full: '1-27,29-55,57-83,85-111' + - rcu_nocbs: '1-27,29-55,57-83,85-111' +sysctl: + - kernel: + - watchdog_cpumask: '0,28,56,84' + - vm: + - nr_hugepages: 4096 + - max_map_count: 20000 + +inventory_ipmi_hostname: 'x.x.x.x' +cpu_microarchitecture: "skylake" diff --git a/fdio.infra.ansible/inventories/sample_inventory/hosts b/fdio.infra.ansible/inventories/sample_inventory/hosts new file mode 100644 index 0000000000..e8e1439db4 --- /dev/null +++ b/fdio.infra.ansible/inventories/sample_inventory/hosts @@ -0,0 +1,9 @@ +all: + children: + tg: + hosts: + 1.1.1.1: #t1-tg + sut: + hosts: + 2.2.2.2: #t1-sut1 + 3.3.3.3: #t1-sut2 diff --git a/fdio.infra.ansible/nomad.yaml b/fdio.infra.ansible/nomad.yaml new file mode 100644 index 0000000000..ae9de1e524 --- /dev/null +++ b/fdio.infra.ansible/nomad.yaml @@ -0,0 +1,32 @@ +--- +# file: nomad.yaml + +- hosts: nomad + remote_user: testuser + become: yes + become_user: root + gather_facts: false + pre_tasks: + - name: Gathering Facts + gather_facts: + tags: + - always + roles: + - role: user_add + tags: user_add + - role: baremetal + tags: baremetal + - role: docker + tags: docker + - role: nomad + tags: nomad + - role: consul + tags: consul + - role: prometheus_exporter + tags: prometheus_exporter + - role: jenkins_job_health_exporter + tags: jenkins_job_health_exporter + - role: cadvisor + tags: cadvisor + - role: cleanup + tags: cleanup \ No newline at end of file diff --git a/fdio.infra.ansible/roles/ab/defaults/main.yaml b/fdio.infra.ansible/roles/ab/defaults/main.yaml new file mode 100644 index 0000000000..45b80be42d --- /dev/null +++ b/fdio.infra.ansible/roles/ab/defaults/main.yaml @@ -0,0 +1,20 @@ +--- +# file: roles/wrk/defaults/main.yaml + +packages: "{{ packages_base + packages_by_distro[ansible_distribution|lower][ansible_distribution_release] + packages_by_arch[ansible_machine] }}" + +packages_base: + - [] + +packages_by_distro: + ubuntu: + bionic: + - "apache2-utils" + focal: + - "apache2-utils" + +packages_by_arch: + aarch64: + - [] + x86_64: + - [] diff --git a/fdio.infra.ansible/roles/ab/tasks/main.yaml b/fdio.infra.ansible/roles/ab/tasks/main.yaml new file mode 100644 index 0000000000..37e702e6df --- /dev/null +++ b/fdio.infra.ansible/roles/ab/tasks/main.yaml @@ -0,0 +1,18 @@ +--- +# file: roles/ab/tasks/main.yaml + +- name: Inst - Update Package Cache (APT) + apt: + update_cache: yes + cache_valid_time: 3600 + when: + - ansible_distribution|lower == 'ubuntu' + tags: + - ab-inst-prerequisites + +- name: Inst - Apache ab tools + package: + name: "{{ packages | flatten(levels=1) }}" + state: present + tags: + - ab-inst \ No newline at end of file diff --git a/fdio.infra.ansible/roles/aws/defaults/main.yaml b/fdio.infra.ansible/roles/aws/defaults/main.yaml new file mode 100644 index 0000000000..d4ea91afd4 --- /dev/null +++ b/fdio.infra.ansible/roles/aws/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +# file: roles/aws/defaults/main.yaml diff --git a/fdio.infra.ansible/roles/aws/handlers/main.yaml b/fdio.infra.ansible/roles/aws/handlers/main.yaml new file mode 100644 index 0000000000..7363dc2c34 --- /dev/null +++ b/fdio.infra.ansible/roles/aws/handlers/main.yaml @@ -0,0 +1,15 @@ +--- +# file: roles/aws/handlers/main.yaml + +- name: Reboot server + reboot: + reboot_timeout: 3600 + tags: + - reboot-server + +- name: AWS - Reload systemd-modules + systemd: + name: "systemd-modules-load" + state: "restarted" + tags: + - reload-systemd-modules diff --git a/fdio.infra.ansible/roles/aws/tasks/main.yaml b/fdio.infra.ansible/roles/aws/tasks/main.yaml new file mode 100644 index 0000000000..2d209762c3 --- /dev/null +++ b/fdio.infra.ansible/roles/aws/tasks/main.yaml @@ -0,0 +1,93 @@ +--- +# file: roles/aws/tasks/main.yaml + +- name: Edit repositories + include_tasks: "{{ ansible_distribution|lower }}_{{ ansible_distribution_release }}.yaml" + tags: + - aws-edit-repo + +- name: Get vfio-pci With WC Patcher + get_url: + url: "https://github.com/amzn/amzn-drivers/raw/master/userspace/dpdk/enav2-vfio-patch/get-vfio-with-wc.sh" + dest: "/opt/get-vfio-with-wc.sh" + mode: "744" + tags: + - aws-vfio-patch + +- name: Create vfio-pci Patch Directory + file: + path: "/opt/patches/" + state: "directory" + tags: + - aws-vfio-patch + +- name: Get vfio-pci WC Patch >=4.10 + get_url: + url: "https://github.com/amzn/amzn-drivers/raw/master/userspace/dpdk/enav2-vfio-patch/patches/linux-4.10-vfio-wc.patch" + dest: "/opt/patches/linux-4.10-vfio-wc.patch" + mode: "744" + tags: + - aws-vfio-patch + +- name: Get vfio-pci WC Patch >=5.8 + get_url: + url: "https://github.com/amzn/amzn-drivers/raw/master/userspace/dpdk/enav2-vfio-patch/patches/linux-5.8-vfio-wc.patch" + dest: "/opt/patches/linux-5.8-vfio-wc.patch" + mode: "744" + tags: + - aws-vfio-patch + +- name: Compile vfio-pci With WC Patch + shell: "/bin/bash /opt/get-vfio-with-wc.sh" + tags: + - aws-vfio-patch + +- name: Load Kernel Modules By Default + lineinfile: + path: "/etc/modules" + state: "present" + line: "{{ item }}" + with_items: + - "vfio-pci" + - "igb_uio" + tags: + - aws-load-kernel-modules + +- name: Add Kernel Modules Options (igb_uio) + lineinfile: + path: "/etc/modprobe.d/igb_uio.conf" + state: "present" + line: "{{ item }}" + create: "yes" + with_items: + - "options igb_uio wc_activate=1" + tags: + - aws-load-kernel-modules + +- name: Add Kernel Modules Options (vfio-pci) + lineinfile: + path: "/etc/modprobe.d/vfio-noiommu.conf" + state: "present" + line: "{{ item }}" + create: "yes" + with_items: + - "options vfio enable_unsafe_noiommu_mode=1" + tags: + - aws-load-kernel-modules + +- name: Reload systemd-modules + systemd: + name: "systemd-modules-load" + state: "restarted" + tags: + - aws-reload-systemd-modules + +- name: Performance Tuning - Adjust nr_hugepages + sysctl: + name: "vm.nr_hugepages" + value: "8192" + state: "present" + sysctl_file: "/etc/sysctl.d/90-csit.conf" + reload: "yes" + tags: + - aws-set-hugepages diff --git a/fdio.infra.ansible/roles/aws/tasks/ubuntu_bionic.yaml b/fdio.infra.ansible/roles/aws/tasks/ubuntu_bionic.yaml new file mode 100644 index 0000000000..bca1cf5095 --- /dev/null +++ b/fdio.infra.ansible/roles/aws/tasks/ubuntu_bionic.yaml @@ -0,0 +1,10 @@ +--- +# file: roles/aws/tasks/ubuntu_bionic.yaml.yaml + +- name: Enable deb-src APT Repository + apt_repository: + repo: "deb-src http://archive.ubuntu.com/ubuntu bionic main" + state: "present" + update_cache: yes + tags: + - aws-enable-src-repo diff --git a/fdio.infra.ansible/roles/aws/tasks/ubuntu_focal.yaml b/fdio.infra.ansible/roles/aws/tasks/ubuntu_focal.yaml new file mode 100644 index 0000000000..a8cc56c0c4 --- /dev/null +++ b/fdio.infra.ansible/roles/aws/tasks/ubuntu_focal.yaml @@ -0,0 +1,10 @@ +--- +# file: roles/aws/tasks/ubuntu_focal.yaml.yaml + +- name: Enable deb-src APT Repository + apt_repository: + repo: "deb-src http://archive.ubuntu.com/ubuntu focal main" + state: "present" + update_cache: yes + tags: + - aws-enable-src-repo diff --git a/fdio.infra.ansible/roles/azure/defaults/main.yaml b/fdio.infra.ansible/roles/azure/defaults/main.yaml new file mode 100644 index 0000000000..8c48c307bc --- /dev/null +++ b/fdio.infra.ansible/roles/azure/defaults/main.yaml @@ -0,0 +1,3 @@ +--- +# file: roles/azure/defaults/main.yaml + diff --git a/fdio.infra.ansible/roles/azure/files/10-dtap.link b/fdio.infra.ansible/roles/azure/files/10-dtap.link new file mode 100644 index 0000000000..a8e0aa10f3 --- /dev/null +++ b/fdio.infra.ansible/roles/azure/files/10-dtap.link @@ -0,0 +1,4 @@ +[Match] +OriginalName=dtap* +[Link] +NamePolicy=kernel diff --git a/fdio.infra.ansible/roles/azure/handlers/main.yaml b/fdio.infra.ansible/roles/azure/handlers/main.yaml new file mode 100644 index 0000000000..f0d46062d9 --- /dev/null +++ b/fdio.infra.ansible/roles/azure/handlers/main.yaml @@ -0,0 +1,15 @@ +--- +# file: roles/azure/handlers/main.yaml + +- name: Reboot server + reboot: + reboot_timeout: 3600 + tags: + - reboot-server + +- name: Azure - Reload systemd-modules + systemd: + name: "systemd-modules-load" + state: "restarted" + tags: + - reload-systemd-modules diff --git a/fdio.infra.ansible/roles/azure/tasks/main.yaml b/fdio.infra.ansible/roles/azure/tasks/main.yaml new file mode 100644 index 0000000000..c8d72475d8 --- /dev/null +++ b/fdio.infra.ansible/roles/azure/tasks/main.yaml @@ -0,0 +1,38 @@ +--- +# file: roles/azure/tasks/main.yaml + +- name: Azure - Load Kernel Modules By Default + lineinfile: + path: "/etc/modules" + state: "present" + line: "{{ item }}" + with_items: + - "vfio-pci" + - "ib_uverbs" + - "mlx4_ib" + - "mlx5_ib" + notify: "Azure - Reload systemd-modules" + tags: + - load-kernel-modules + +- name: Azure - Performance Tuning - Adjust nr_hugepages + sysctl: + name: "vm.nr_hugepages" + value: "8192" + state: "present" + sysctl_file: "/etc/sysctl.d/90-csit.conf" + reload: "yes" + tags: + - set-sysctl + +- name: Azure - prevent interface renaming + copy: + src: "files/10-dtap.link" + dest: "/etc/systemd/network/" + owner: "root" + group: "root" + mode: "0644" + notify: + - "Reboot server" + tags: + - prevent-interface-renaming diff --git a/fdio.infra.ansible/roles/baremetal/handlers/cimc.yaml b/fdio.infra.ansible/roles/baremetal/handlers/cimc.yaml new file mode 100644 index 0000000000..0048d19032 --- /dev/null +++ b/fdio.infra.ansible/roles/baremetal/handlers/cimc.yaml @@ -0,0 +1,74 @@ +--- +# file: roles/baremeatal/handlers/cimc.yaml + +- name: Boot from network + imc_rest: + hostname: "{{ inventory_cimc_hostname }}" + username: "{{ inventory_cimc_username }}" + password: "{{ inventory_cimc_password }}" + validate_certs: no + content: | + + + + + delegate_to: localhost + tags: + - boot-network + +- name: Boot from storage + imc_rest: + hostname: "{{ inventory_cimc_hostname }}" + username: "{{ inventory_cimc_username }}" + password: "{{ inventory_cimc_password }}" + validate_certs: no + content: | + + + + delegate_to: localhost + tags: + - boot-storage + +- name: Power up server + imc_rest: + hostname: "{{ inventory_cimc_hostname }}" + username: "{{ inventory_cimc_username }}" + password: "{{ inventory_cimc_password }}" + validate_certs: no + content: | + + + + delegate_to: localhost + tags: + - power-up + +- name: Power down server + imc_rest: + hostname: "{{ inventory_cimc_hostname }}" + username: "{{ inventory_cimc_username }}" + password: "{{ inventory_cimc_password }}" + validate_certs: no + content: | + + + + delegate_to: localhost + tags: + - power-down + +- name: Power cycle server + imc_rest: + hostname: "{{ inventory_cimc_hostname }}" + username: "{{ inventory_cimc_username }}" + password: "{{ inventory_cimc_password }}" + validate_certs: no + content: | + + + + + delegate_to: localhost + tags: + - power-cycle diff --git a/fdio.infra.ansible/roles/baremetal/handlers/ipmi.yaml b/fdio.infra.ansible/roles/baremetal/handlers/ipmi.yaml new file mode 100644 index 0000000000..239b8973f7 --- /dev/null +++ b/fdio.infra.ansible/roles/baremetal/handlers/ipmi.yaml @@ -0,0 +1,52 @@ +--- +# file: roles/baremetal/handlers/ipmi.yaml + +- name: Boot from network + ipmi_boot: + name: "{{ inventory_ipmi_hostname }}" + user: "{{ inventory_ipmi_username }}" + password: "{{ inventory_ipmi_password }}" + bootdev: network + delegate_to: localhost + tags: + - boot-network + +- name: Boot from storage + ipmi_boot: + name: "{{ inventory_ipmi_hostname }}" + user: "{{ inventory_ipmi_username }}" + password: "{{ inventory_ipmi_password }}" + bootdev: hd + delegate_to: localhost + tags: + - boot-storage + +- name: Power up server + ipmi_power: + name: "{{ inventory_ipmi_hostname }}" + user: "{{ inventory_ipmi_username }}" + password: "{{ inventory_ipmi_password }}" + state: on + delegate_to: localhost + tags: + - power-up + +- name: Power down server + ipmi_power: + name: "{{ inventory_ipmi_hostname }}" + user: "{{ inventory_ipmi_username }}" + password: "{{ inventory_ipmi_password }}" + state: off + delegate_to: localhost + tags: + - power-down + +- name: Power cycle server + ipmi_power: + name: "{{ inventory_ipmi_hostname }}" + user: "{{ inventory_ipmi_username }}" + password: "{{ inventory_ipmi_password }}" + state: boot + delegate_to: localhost + tags: + - power-cycle diff --git a/fdio.infra.ansible/roles/baremetal/handlers/main.yaml b/fdio.infra.ansible/roles/baremetal/handlers/main.yaml new file mode 100644 index 0000000000..d8dabeb222 --- /dev/null +++ b/fdio.infra.ansible/roles/baremetal/handlers/main.yaml @@ -0,0 +1,30 @@ +--- +# file: roles/baremetal/handlers/main.yaml + +- name: IPMI specific + import_tasks: ipmi.yaml + when: inventory_ipmi_hostname is defined + tags: + - ipmi-handlers + +- name: CIMC specific + import_tasks: cimc.yaml + when: inventory_cimc_hostname is defined + tags: + - cimc-handlers + +- name: Reboot server + reboot: + reboot_timeout: 3600 + tags: + - reboot-server + +- name: Wait for server to restart + wait_for: + host: "{{ inventory_hostname }}" + search_regex: OpenSSH + port: 22 + delay: 60 + timeout: 3600 + tags: + - reboot-server diff --git a/fdio.infra.ansible/roles/cadvisor/defaults/main.yaml b/fdio.infra.ansible/roles/cadvisor/defaults/main.yaml new file mode 100644 index 0000000000..3b25e551ea --- /dev/null +++ b/fdio.infra.ansible/roles/cadvisor/defaults/main.yaml @@ -0,0 +1,24 @@ +--- +# file: roles/cadvisor/defaults/main.yaml + +packages: "{{ packages_base + packages_by_distro[ansible_distribution | lower] + packages_by_arch[ansible_machine] }}" + +packages_base: + - [] + +packages_by_distro: + ubuntu: + - "python3-docker" + - "python3-dockerpty" + +packages_by_arch: + aarch64: + - [] + x86_64: + - [] + +image: "{{ image_by_arch[ansible_machine] }}" + +image_by_arch: + aarch64: "zcube/cadvisor:v0.37.0" + x86_64: "gcr.io/cadvisor/cadvisor:v0.38.7" \ No newline at end of file diff --git a/fdio.infra.ansible/roles/cadvisor/tasks/main.yaml b/fdio.infra.ansible/roles/cadvisor/tasks/main.yaml new file mode 100644 index 0000000000..a2a13368c2 --- /dev/null +++ b/fdio.infra.ansible/roles/cadvisor/tasks/main.yaml @@ -0,0 +1,39 @@ +--- +# file: roles/cadvisor/tasks/main.yaml + +- name: Inst - Update Package Cache (APT) + apt: + update_cache: yes + cache_valid_time: 3600 + when: + - ansible_distribution|lower == 'ubuntu' + tags: + - cadvisor-inst-prerequisites + +- name: Inst - Prerequisites + package: + name: "{{ packages | flatten(levels=1) }}" + state: latest + tags: + - cadvisor-inst-prerequisites + +- name: Inst - Start a container + docker_container: + name: "cAdvisor" + image: "{{ image }}" + state: "started" + restart_policy: "unless-stopped" + detach: yes + devices: + - "/dev/kmsg" + ports: + - "8080:8080" + privileged: yes + volumes: + - "/:/rootfs:ro" + - "/var/run:/var/run:ro" + - "/sys:/sys:ro" + - "/var/lib/docker/:/var/lib/docker:ro" + - "/dev/disk/:/dev/disk:ro" + tags: + - cadvisor-run-container diff --git a/fdio.infra.ansible/roles/calibration/defaults/main.yaml b/fdio.infra.ansible/roles/calibration/defaults/main.yaml new file mode 100644 index 0000000000..020c0119b1 --- /dev/null +++ b/fdio.infra.ansible/roles/calibration/defaults/main.yaml @@ -0,0 +1,47 @@ +--- +# file: roles/calibration/defaults/main.yaml + +# Packages to install. +packages: "{{ packages_base + packages_by_distro[ansible_distribution|lower][ansible_distribution_release] + packages_by_arch[ansible_machine] }}" + +packages_base: + - [] + +packages_by_distro: + ubuntu: + bionic: + - "build-essential" + - "dmidecode" + focal: + - "build-essential" + - "dmidecode" + +packages_by_arch: + aarch64: + - [] + x86_64: + - [] + +# Kernel version to check. +kernel_version: "{{ kernel_version_by_distro_by_arch[ansible_distribution | lower][ansible_distribution_release][ansible_machine] }}" + +kernel_version_by_distro_by_arch: + ubuntu: + bionic: + x86_64: + - "4.15.0-72-generic" + - "5.3.0-1020-azure" + - "4.15.0-1057-aws" + aarch64: + - "4.15.0-54-generic" + focal: + x86_64: + - "5.4.0-65-generic" + - "5.3.0-1020-azure" + - "5.4.0-1035-aws" + aarch64: + - "5.4.0-65-generic" + +pma_directory: "/tmp/pma_tools" +jitter_core: 7 +jitter_iterations: 20 diff --git a/fdio.infra.ansible/roles/calibration/tasks/aarch64.yaml b/fdio.infra.ansible/roles/calibration/tasks/aarch64.yaml new file mode 100644 index 0000000000..ca4e75d268 --- /dev/null +++ b/fdio.infra.ansible/roles/calibration/tasks/aarch64.yaml @@ -0,0 +1,2 @@ +--- +# file: roles/calibration/tasks/aarch64.yaml diff --git a/fdio.infra.ansible/roles/calibration/tasks/main.yaml b/fdio.infra.ansible/roles/calibration/tasks/main.yaml new file mode 100644 index 0000000000..696f1c9265 --- /dev/null +++ b/fdio.infra.ansible/roles/calibration/tasks/main.yaml @@ -0,0 +1,89 @@ +--- +# file: roles/calibration/tasks/main.yaml + +- name: Inst - Update Package Cache (APT) + apt: + update_cache: yes + cache_valid_time: 3600 + when: + - ansible_distribution|lower == 'ubuntu' + tags: + - calibration-inst-prerequisites + +- name: Inst - Prerequisites + package: + name: "{{ packages | flatten(levels=1) }}" + state: latest + tags: + - calibration-inst-prerequisites + +- name: Check CPU Power States + shell: "lscpu" + register: current_lscpu + changed_when: false + tags: + - check-cpu-frequency + +- name: Check CPU Power States + assert: + that: + - "'CPU min MHz' not in current_lscpu.stdout or 'Intel(R) Xeon(R)' not in ansible_processor" + fail_msg: "CPU configuration!" + success_msg: "CPU configuration match." + tags: + - check-cpu-frequency + +- name: Check Kernel Parameters + assert: + that: + - item in ansible_cmdline and grub[item] == ansible_cmdline[item] + fail_msg: "Kernel parameters!" + success_msg: "Kernel parameters match." + loop: "{{ grub.keys()|sort }}" + when: + - grub is defined + tags: + - check-kernel-params + +- name: Check Kernel Version + assert: + that: + - ansible_kernel not in kernel_version_by_distro_by_arch + fail_msg: "Kernel version!" + success_msg: "Kernel version match." + tags: + - check-kernel-version + +- name: Get Spectre Meltdown Checker + get_url: + url: "https://meltdown.ovh" + dest: "/opt/spectre-meltdown-checker.sh" + mode: "744" + tags: + - check-spectre-meltdown + +- name: Run Spectre Meltdown Checker + shell: "/opt/spectre-meltdown-checker.sh --no-color || true" + async: 60 + poll: 0 + ignore_errors: true + register: spectre_meltdown_async + tags: + - check-spectre-meltdown + +- name: "{{ ansible_machine }} Specific" + include_tasks: "{{ ansible_machine }}.yaml" + tags: + - check-machine-specific + - check-jitter-tool + +- name: Check Sync Status + async_status: + jid: "{{ spectre_meltdown_async.ansible_job_id }}" + register: "spectre_meltdown_poll_results" + until: spectre_meltdown_poll_results.finished + retries: 30 + +- debug: var=spectre_meltdown_poll_results.stdout_lines + tags: + - check-spectre-meltdown diff --git a/fdio.infra.ansible/roles/calibration/tasks/x86_64.yaml b/fdio.infra.ansible/roles/calibration/tasks/x86_64.yaml new file mode 100644 index 0000000000..90b1c954b5 --- /dev/null +++ b/fdio.infra.ansible/roles/calibration/tasks/x86_64.yaml @@ -0,0 +1,35 @@ +--- +# file: roles/calibration/tasks/x86_64.yaml + +- name: Calibration - Clone PMA Tool + git: + repo: "https://gerrit.fd.io/r/pma_tools" + dest: "{{ pma_directory }}" + tags: + - check-jitter-tool + +- name: Calibration - Compile PMA Tool + raw: "cd {{ pma_directory }}/jitter && make" + tags: + - check-jitter-tool + +- name: Calibration - Run Jitter Tool + shell: "{{ pma_directory }}/jitter/jitter -c {{ jitter_core }} -i {{ jitter_iterations }} -f" + become: yes + async: 60 + poll: 0 + ignore_errors: yes + register: jitter_async + tags: + - check-jitter-tool + +- name: Check sync status + async_status: + jid: "{{ jitter_async.ansible_job_id }}" + register: "jitter_poll_results" + until: jitter_poll_results.finished + retries: 30 + +- debug: var=jitter_poll_results.stdout_lines + tags: + - check-jitter-tool diff --git a/fdio.infra.ansible/roles/cleanup/files/reset_vppdevice.sh b/fdio.infra.ansible/roles/cleanup/files/reset_vppdevice.sh new file mode 100644 index 0000000000..ede2db1273 --- /dev/null +++ b/fdio.infra.ansible/roles/cleanup/files/reset_vppdevice.sh @@ -0,0 +1,113 @@ +#!/usr/bin/env bash + +set -euo pipefail + +function die () { + # Print the message to standard error end exit with error code specified + # by the second argument. + # + # Hardcoded values: + # - The default error message. + # Arguments: + # - ${1} - The whole error message, be sure to quote. Optional + # - ${2} - the code to exit with, default: 1. + + set +eu + warn "${1:-Unspecified run-time error occurred!}" + exit "${2:-1}" +} + + +function set_eligibility_off { + # Set Nomad eligibility to ineligible for scheduling. Fail otherwise. + + set -euo pipefail + + node_id="$(nomad node status | grep $(hostname) | cut -d ' ' -f 1)" || die + node_status="$(nomad node status | grep $(hostname))" || die + + if [[ "${node_status}" != *"ineligible"* ]]; then + nomad node eligibility -disable "${node_id}" || die + node_status="$(nomad node status | grep $(hostname))" || die + if [[ "${node_status}" != *"ineligible"* ]]; then + die "Set eligibility off failed!" + fi + fi +} + + +function set_eligibility_on { + # Set Nomad eligibility to eligible for scheduling. Fail otherwise. + + set -euo pipefail + + node_id="$(nomad node status | grep $(hostname) | cut -d ' ' -f 1)" || die + node_status="$(nomad node status | grep $(hostname))" || die + + if [[ "${node_status}" == *"ineligible"* ]]; then + nomad node eligibility -enable "${node_id}" || die + node_status="$(nomad node status | grep $(hostname))" || die + if [[ "${node_status}" == *"ineligible"* ]]; then + die "Set eligibility on failed!" + fi + fi +} + + +function restart_vfs_service { + # Stop and start VF serice. This will reinitialize VFs and driver mappings. + + set -euo pipefail + + warn "Restarting VFs service (this may take few minutes)..." + sudo service csit-initialize-vfs stop || die "Failed to stop VFs service!" + sudo service csit-initialize-vfs start || die "Failed to start VFs service!" +} + + +function wait_for_pending_containers { + # Wait in loop for defined amount of time for pending containers to + # gracefully quit them. If parameter force is specified. Force kill them. + + # Arguments: + # - ${@} - Script parameters. + + set -euo pipefail + + retries=60 + wait_time=60 + containers=(docker ps --quiet --filter name=csit*) + + for i in $(seq 1 ${retries}); do + mapfile -t pending_containers < <( ${containers[@]} ) || die + warn "Waiting for pending containers [${pending_containers[@]}] ..." + if [ ${#pending_containers[@]} -eq 0 ]; then + break + fi + sleep "${wait_time}" || die + done + if [ ${#pending_containers[@]} -ne 0 ]; then + if [[ "${1-}" == "force" ]]; then + warn "Force killing [${pending_containers[@]}] ..." + docker rm --force ${pending_containers[@]} || die + else + die "Still few containers running!" + fi + fi +} + + +function warn () { + # Print the message to standard error. + # + # Arguments: + # - ${@} - The text of the message. + + echo "$@" >&2 +} + + +set_eligibility_off || die +wait_for_pending_containers "${@}" || die +restart_vfs_service || die +set_eligibility_on || die diff --git a/fdio.infra.ansible/roles/cleanup/tasks/clean_images.yaml b/fdio.infra.ansible/roles/cleanup/tasks/clean_images.yaml new file mode 100644 index 0000000000..e030acbff2 --- /dev/null +++ b/fdio.infra.ansible/roles/cleanup/tasks/clean_images.yaml @@ -0,0 +1,36 @@ +--- +# file: roles/cleanup/tasks/clean_images.yaml + +- name: Clean Docker Images + block: + - name: Clean Images - Prefetch Docker Images + cron: + name: "Prefetch docker image {{ item }}" + minute: "10" + hour: "7" + job: "/usr/bin/docker pull {{ item }}" + loop: + "{{ images_to_prefetch_by_arch[ansible_machine] }}" + tags: + - prefetch-docker-images + + - name: Clean Images - Remove Dangling Docker Images + cron: + name: "Remove dangling docker images" + minute: "10" + hour: "5" + weekday: "7" + job: "/usr/bin/docker rmi $(/usr/bin/docker images --filter 'dangling=true' -q)" + tags: + - remove-docker-images-dangling + + # TODO: Disabled until all images will be in registry + #- name: Clean Images - Prune Docker Images + # cron: + # name: "Prune docker images" + # minute: "10" + # hour: "6" + # weekday: 7 + # job: "/usr/bin/docker image prune --all --force" + # tags: + # - prune-docker-images \ No newline at end of file diff --git a/fdio.infra.ansible/roles/cleanup/tasks/kill_containers.yaml b/fdio.infra.ansible/roles/cleanup/tasks/kill_containers.yaml new file mode 100644 index 0000000000..25fd48e420 --- /dev/null +++ b/fdio.infra.ansible/roles/cleanup/tasks/kill_containers.yaml @@ -0,0 +1,42 @@ +--- +# file: roles/cleanup/tasks/kill_containers.yaml + +- name: Kill Docker Containers + block: + - name: Kill Container - Get Running Docker Containers + shell: "docker ps -aq" + register: running_containers + changed_when: no + tags: + - kill-containers + + - name: Kill Container - Remove All Docker Containers + shell: "docker rm --force {{ item }}" + with_items: "{{ running_containers.stdout_lines }}" + tags: + - kill-containers + + rescue: + - name: Restart Docker Daemon + systemd: + name: "docker" + state: "restarted" + +- name: Kill LXC Containers + block: + - name: Kill Container - Get Running LXC Containers + shell: "lxc-ls" + register: running_containers + changed_when: no + tags: + - kill-containers + + - name: Kill Container - Remove All LXC Containers + shell: "lxc-destroy --force -n {{ item }}" + with_items: "{{ running_containers.stdout_lines }}" + tags: + - kill-containers + + rescue: + - fail: + msg: "Kill LXC containers failed!" \ No newline at end of file diff --git a/fdio.infra.ansible/roles/cleanup/tasks/kill_process.yaml b/fdio.infra.ansible/roles/cleanup/tasks/kill_process.yaml new file mode 100644 index 0000000000..c7cee37485 --- /dev/null +++ b/fdio.infra.ansible/roles/cleanup/tasks/kill_process.yaml @@ -0,0 +1,37 @@ +--- +# file: roles/cleanup/tasks/kill_process.yaml + +- name: Kill Process - {{ process }} + block: + - name: Get PID Of {{ process }} + shell: "ps -ef | grep -v grep | grep -w {{ process }} | awk '{print $2}'" + when: + - process is defined and process != "" + register: running_processes + tags: + - kill-process + + - name: Safe Kill {{ process }} + shell: "kill {{ item }}" + with_items: "{{ running_processes.stdout_lines }}" + tags: + - kill-process + + - wait_for: + path: "/proc/{{ item }}/status" + state: "absent" + with_items: "{{ running_processes.stdout_lines }}" + ignore_errors: yes + register: killed_processes + tags: + - kill-process + + - name: Kill Process - Force Kill {{ process }} + shell: "kill -9 {{ item }}" + with_items: "{{ killed_processes.results | select('failed') | map(attribute='item') | list }}" + tags: + - kill-process + + rescue: + - fail: + msg: "Kill process {{ process }} failed!" diff --git a/fdio.infra.ansible/roles/cleanup/tasks/main.yaml b/fdio.infra.ansible/roles/cleanup/tasks/main.yaml new file mode 100644 index 0000000000..eeda0139b3 --- /dev/null +++ b/fdio.infra.ansible/roles/cleanup/tasks/main.yaml @@ -0,0 +1,43 @@ +--- +# file: roles/cleanup/tasks/main.yaml +# purpose: Structured per server cleanup tasks. +# - main: +# - tg: +# - Run tasks on TG servers only. +# - Cleanup processes (T-Rex). +# - sut: +# - Run tasks on SUT servers only. +# - Cleanup file leftovers (logs). +# - Cleanup packages (VPP, Honeycomb). +# - Cleanup processes (qemu, l3fwd, testpmd, docker, kubernetes) +# - Cleanup interfaces. +# - vpp_device +# - Run tasks on vpp_device servers only. +# - Reset SRIOV +# - Docker image cleanup +# - nomad +# - Docker image cleanup + +- name: tg specific + include_tasks: tg.yaml + when: "'tg' in group_names" + tags: + - cleanup + +- name: sut specific + include_tasks: sut.yaml + when: "'sut' in group_names" + tags: + - cleanup + +- name: vpp_device specific + include_tasks: vpp_device.yaml + when: "'vpp_device' in group_names" + tags: + - cleanup + +- name: nomad specific + include_tasks: nomad.yaml + when: "'nomad' in group_names" + tags: + - cleanup diff --git a/fdio.infra.ansible/roles/cleanup/tasks/nomad.yaml b/fdio.infra.ansible/roles/cleanup/tasks/nomad.yaml new file mode 100644 index 0000000000..3c5bf6462d --- /dev/null +++ b/fdio.infra.ansible/roles/cleanup/tasks/nomad.yaml @@ -0,0 +1,22 @@ +--- +# file: roles/cleanup/tasks/nomad.yaml + +- name: Host Cleanup + block: + - name: Clean Images + import_tasks: clean_images.yaml + vars: + images_to_prefetch_by_arch: + aarch64: + - "fdiotools/builder-ubuntu2004:prod-aarch64" + - "fdiotools/builder-ubuntu1804:prod-aarch64" + - "fdiotools/builder-centos8:prod-aarch64" + x86_64: + - "fdiotools/builder-ubuntu2004:prod-x86_64" + - "fdiotools/builder-ubuntu1804:prod-x86_64" + - "fdiotools/builder-debian10:prod-x86_64" + - "fdiotools/builder-debian9:prod-x86_64" + - "fdiotools/builder-centos8:prod-x86_64" + - "fdiotools/builder-centos7:prod-x86_64" + tags: + - clean-images \ No newline at end of file diff --git a/fdio.infra.ansible/roles/cleanup/tasks/remove_package.yaml b/fdio.infra.ansible/roles/cleanup/tasks/remove_package.yaml new file mode 100644 index 0000000000..302b43c99a --- /dev/null +++ b/fdio.infra.ansible/roles/cleanup/tasks/remove_package.yaml @@ -0,0 +1,21 @@ +--- +# file: roles/cleanup/tasks/remove_package.yaml + +- name: Remove Package - Fix Corrupted APT + shell: "dpkg --configure -a" + when: + - ansible_distribution == 'Ubuntu' + tags: + - remove-package + +- name: Remove Package - {{ package }} + apt: + name: "{{ package }}" + force: yes + purge: yes + state: "absent" + failed_when: no + when: + - ansible_distribution == 'Ubuntu' + tags: + - remove-package diff --git a/fdio.infra.ansible/roles/cleanup/tasks/sut.yaml b/fdio.infra.ansible/roles/cleanup/tasks/sut.yaml new file mode 100644 index 0000000000..d80a35b1cb --- /dev/null +++ b/fdio.infra.ansible/roles/cleanup/tasks/sut.yaml @@ -0,0 +1,83 @@ +--- +# file: roles/cleanup/tasks/sut.yaml + +- name: Host Cleanup + block: + - name: Kill Processes - Qemu + import_tasks: kill_process.yaml + vars: + process: "qemu" + tags: + - kill-process + + - name: Kill Processes - L3fwd + import_tasks: kill_process.yaml + vars: + process: "l3fwd" + tags: + - kill-process + + - name: Kill Processes - Testpmd + import_tasks: kill_process.yaml + vars: + process: "testpmd" + tags: + - kill-process + + - name: Kill Processes - iPerf3 + import_tasks: kill_process.yaml + vars: + process: "iperf3" + tags: + - kill-process + + - name: Kill Processes - vpp_echo + import_tasks: kill_process.yaml + vars: + process: "vpp_echo" + tags: + - kill-process + + - name: Find File Or Dir - Core Zip File + find: + paths: "/tmp/" + patterns: "*tar.lzo.lrz.xz*" + register: files_to_delete + tags: + - remove-file-dir + + - name: Remove File Or Dir - Core Zip File + file: + path: "{{ item.path }}" + state: absent + with_items: "{{ files_to_delete.files }}" + tags: + - remove-file-dir + + - name: Find File Or Dir - Core Dump File + find: + paths: "/tmp/" + patterns: "*core*" + register: files_to_delete + tags: + - remove-file-dir + + - name: Remove File Or Dir - Core Dump File + file: + path: "{{ item.path }}" + state: absent + with_items: "{{ files_to_delete.files }}" + tags: + - remove-file-dir + + - name: Kill Containers - Remove All Containers + import_tasks: kill_containers.yaml + tags: + - kill-containers + + - name: Remove Packages - Remove VPP + import_tasks: remove_package.yaml + vars: + package: "*vpp*" + tags: + - remove-package diff --git a/fdio.infra.ansible/roles/cleanup/tasks/tg.yaml b/fdio.infra.ansible/roles/cleanup/tasks/tg.yaml new file mode 100644 index 0000000000..fa2d2d2819 --- /dev/null +++ b/fdio.infra.ansible/roles/cleanup/tasks/tg.yaml @@ -0,0 +1,13 @@ +--- +# file: roles/cleanup/tasks/tg.yaml + +- name: Host Cleanup + block: + - name: Kill Processes - TRex + import_tasks: kill_process.yaml + vars: + process: "_t-rex" + when: + - docker_tg is undefined + tags: + - kill-process diff --git a/fdio.infra.ansible/roles/cleanup/tasks/vpp_device.yaml b/fdio.infra.ansible/roles/cleanup/tasks/vpp_device.yaml new file mode 100644 index 0000000000..41c4b29d37 --- /dev/null +++ b/fdio.infra.ansible/roles/cleanup/tasks/vpp_device.yaml @@ -0,0 +1,32 @@ +--- +# file: roles/cleanup/tasks/vpp_device.yaml + +- name: Host Cleanup + block: + - name: Reset vpp_device Binary + copy: + src: "files/reset_vppdevice.sh" + dest: "/usr/local/bin" + owner: "root" + group: "root" + mode: "744" + tags: + - reset-sriov + + - name: Clean Images + import_tasks: clean_images.yaml + vars: + images_to_prefetch_by_arch: + aarch64: + - "fdiotools/builder-ubuntu2004:prod-aarch64" + - "fdiotools/builder-ubuntu1804:prod-aarch64" + - "fdiotools/builder-centos8:prod-aarch64" + x86_64: + - "fdiotools/builder-ubuntu2004:prod-x86_64" + - "fdiotools/builder-ubuntu1804:prod-x86_64" + - "fdiotools/builder-debian10:prod-x86_64" + - "fdiotools/builder-debian9:prod-x86_64" + - "fdiotools/builder-centos8:prod-x86_64" + - "fdiotools/builder-centos7:prod-x86_64" + tags: + - clean-images \ No newline at end of file diff --git a/fdio.infra.ansible/roles/common/defaults/main.yaml b/fdio.infra.ansible/roles/common/defaults/main.yaml new file mode 100644 index 0000000000..43e40ebdf6 --- /dev/null +++ b/fdio.infra.ansible/roles/common/defaults/main.yaml @@ -0,0 +1,72 @@ +--- +# file: roles/common/defaults/main.yaml + +packages: "{{ packages_base + packages_by_distro[ansible_distribution|lower][ansible_distribution_release] + packages_by_arch[ansible_machine] }}" + +packages_base: + - "autoconf" + - "cgroup-tools" + - "dkms" + - "iperf3" + - "linux-tools-common" + - "ninja-build" + - "qemu-system" + - "socat" + - "unzip" + - "virtualenv" + +packages_by_distro: + ubuntu: + bionic: + - "build-essential" + - "libpcap-dev" + - "net-tools" + - "python-all" + - "python-apt" + - "python-cffi" + - "python-cffi-backend" + - "python-dev" + - "python-pip" + - "python-setuptools" + - "python3-all" + - "python3-apt" + - "python3-cffi" + - "python3-cffi-backend" + - "python3-dev" + - "python3-pip" + - "python3-pyelftools" + - "python3-setuptools" + focal: + - "build-essential" + - "libpcap-dev" + - "net-tools" + - "python3-all" + - "python3-apt" + - "python3-cffi" + - "python3-cffi-backend" + - "python3-dev" + - "python3-pip" + - "python3-pyelftools" + - "python3-setuptools" + +packages_by_arch: + aarch64: + - "gfortran" + - "libblas-dev" + - "libffi-dev" + - "liblapack-dev" + - "libssl-dev" + x86_64: + - [] + +# Proxy settings: Uncomment and fill the proper values. These variables will be +# set globally by writing into /etc/environment file on target machine. +#proxy_env: +# http_proxy: http://proxy.com:80 +# HTTP_PROXY: http://proxy.com:80 +# https_proxy: http://proxy.com:80 +# HTTPS_PROXY: http://proxy.com:80 +# ftp_proxy: http://proxy.com:80 +# FTP_PROXY: http://proxy.com:80 +# no_proxy: localhost,127.0.0.1,{{ ansible_default_ipv4.address }} +# NO_PROXY: localhost,127.0.0.1,{{ ansible_default_ipv4.address }} \ No newline at end of file diff --git a/fdio.infra.ansible/roles/common/handlers/main.yaml b/fdio.infra.ansible/roles/common/handlers/main.yaml new file mode 100644 index 0000000000..bb317e8067 --- /dev/null +++ b/fdio.infra.ansible/roles/common/handlers/main.yaml @@ -0,0 +1,8 @@ +--- +# file: roles/common/handlers/main.yaml + +- name: Reboot Server + reboot: + reboot_timeout: 3600 + tags: + - reboot-server diff --git a/fdio.infra.ansible/roles/common/tasks/main.yaml b/fdio.infra.ansible/roles/common/tasks/main.yaml new file mode 100644 index 0000000000..60b49842d2 --- /dev/null +++ b/fdio.infra.ansible/roles/common/tasks/main.yaml @@ -0,0 +1,55 @@ +--- +# file: roles/common/tasks/main.yaml + +- name: Conf - Add permanent proxy settings + lineinfile: + path: "/etc/environment" + state: "present" + line: "{{ item.key }}={{ item.value }}" + with_dict: "{{ proxy_env }}" + when: proxy_env is defined + tags: + - common-conf-proxy + +- name: Inst - Update package cache (apt) + apt: + update_cache: yes + cache_valid_time: 3600 + when: + - ansible_distribution|lower == 'ubuntu' + tags: + - common-inst-prerequisites + +- name: Inst - Prerequisites + package: + name: "{{ packages | flatten(levels=1) }}" + state: latest + tags: + - common-inst-prerequisites + +- name: Inst - Meson (DPDK) + pip: + name: + - "meson==0.47.1" + tags: + - common-inst-meson + +- name: Conf - sudoers admin + lineinfile: + path: "/etc/sudoers" + state: "present" + regexp: "^%admin ALL=" + line: "%admin ALL=(ALL) ALL" + validate: "/usr/sbin/visudo -cf %s" + tags: + - common-conf-sudoers + +- name: Conf - sudoers nopasswd + lineinfile: + path: "/etc/sudoers" + state: "present" + regexp: "^%sudo" + line: "%sudo ALL=(ALL:ALL) NOPASSWD: ALL" + validate: "/usr/sbin/visudo -cf %s" + tags: + - common-conf-sudoers diff --git a/fdio.infra.ansible/roles/consul/defaults/main.yaml b/fdio.infra.ansible/roles/consul/defaults/main.yaml new file mode 100644 index 0000000000..786554eb58 --- /dev/null +++ b/fdio.infra.ansible/roles/consul/defaults/main.yaml @@ -0,0 +1,110 @@ +--- +# file: roles/consul/defaults/main.yaml + +# Inst - Prerequisites. +packages: "{{ packages_base + packages_by_distro[ansible_distribution | lower] + packages_by_arch[ansible_machine] }}" + +packages_base: + - "cgroup-bin" + - "curl" + - "git" + - "libcgroup1" + - "unzip" + - "htop" +packages_by_distro: + ubuntu: + - [] +packages_by_arch: + aarch64: + - [] + x86_64: + - [] + +# Inst - Download Consul. +consul_architecture_map: + amd64: "amd64" + x86_64: "amd64" + armv7l: "arm" + aarch64: "arm64" + 32-bit: "386" + 64-bit: "amd64" +consul_architecture: "{{ consul_architecture_map[ansible_architecture] }}" +consul_version: "1.8.6" +consul_pkg: "consul_{{ consul_version }}_linux_{{ consul_architecture }}.zip" +consul_zip_url: "https://releases.hashicorp.com/consul/{{ consul_version }}/{{ consul_pkg }}" + +# Inst - System paths. +consul_bin_dir: "/usr/local/bin" +consul_config_dir: "/etc/consul.d" +consul_data_dir: "/var/consul" +consul_inst_dir: "/opt" +consul_lockfile: "/var/lock/subsys/consul" +consul_run_dir: "/var/run/consul" +consul_ssl_dir: "/etc/consul.d/ssl" +nomad_config_dir: "/etc/nomad.d" + +# Conf - Service. +consul_node_role: "both" +consul_restart_handler_state: "restarted" +nomad_restart_handler_state: "restarted" +systemd_resolved_state: "stopped" + +# Conf - User and group. +consul_group: "consul" +consul_group_state: "present" +consul_manage_group: true +consul_manage_user: true +consul_user: "consul" +consul_user_groups: [ docker, nomad, consul, root ] +consul_user_state: "present" + +# Conf - nomad.d/consul.hcl +consul_nomad_integration: true +consul_certificates: + - src: "{{ vault_consul_v1_ca_file }}" + dest: "{{ consul_ca_file }}" + - src: "{{ vault_consul_v1_cert_file }}" + dest: "{{ consul_cert_file }}" + - src: "{{ vault_consul_v1_key_file }}" + dest: "{{ consul_key_file }}" + +consul_auto_advertise: true +consul_checks_use_advertise: true +consul_server_service_name: "nomad" +consul_client_service_name: "nomad-client" +consul_server_auto_join: false +consul_client_auto_join: true +consul_ACL_token_set: false +consul_token: "consul_token_default" + +# Conf - base.hcl +consul_bind_addr: "{{ ansible_default_ipv4.address }}" +consul_client_addr: "0.0.0.0" +consul_datacenter: "dc1" +consul_disable_update_check: true +consul_enable_debug: false +consul_enable_syslog: true +consul_log_level: "INFO" +consul_node_name: "{{ inventory_hostname }}" +consul_retry_join: true +consul_bootstrap_expect: 2 +consul_encrypt: "" +consul_ca_file: "{{ consul_ssl_dir }}/ca.pem" +consul_cert_file: "{{ consul_ssl_dir }}/consul.pem" +consul_key_file: "{{ consul_ssl_dir }}/consul-key.pem" +consul_ui: true +consul_recursors: + - 1.1.1.1 + - 8.8.8.8 + +# Conf - ports.hcl +consul_port_dns: 53 +consul_port_http: 8500 +consul_port_https: 8501 +consul_port_grpc: 8502 +consul_port_serf_lan: 8301 +consul_port_serf_wan: 8302 +consul_port_server: 8300 + +# Conf - services.json +consul_services: false \ No newline at end of file diff --git a/fdio.infra.ansible/roles/consul/handlers/main.yaml b/fdio.infra.ansible/roles/consul/handlers/main.yaml new file mode 100644 index 0000000000..338baea74e --- /dev/null +++ b/fdio.infra.ansible/roles/consul/handlers/main.yaml @@ -0,0 +1,23 @@ +--- +# file roles/consul/handlers/main.yaml + +- name: Restart Nomad + systemd: + daemon_reload: true + enabled: true + name: "nomad" + state: "{{ nomad_restart_handler_state }}" + +- name: Restart Consul + systemd: + daemon_reload: true + enabled: true + name: "consul" + state: "{{ consul_restart_handler_state }}" + +- name: Stop Systemd-resolved + systemd: + daemon_reload: true + enabled: false + name: "systemd-resolved" + state: "{{ systemd_resolved_state }}" \ No newline at end of file diff --git a/fdio.infra.ansible/roles/consul/meta/main.yaml b/fdio.infra.ansible/roles/consul/meta/main.yaml new file mode 100644 index 0000000000..4ada8efad6 --- /dev/null +++ b/fdio.infra.ansible/roles/consul/meta/main.yaml @@ -0,0 +1,9 @@ +--- +# file: roles/consul/meta/main.yaml + +# desc: Install consul from stable branch and configure service. +# inst: Consul +# conf: ? +# info: 1.0 - added role + +dependencies: [ ] diff --git a/fdio.infra.ansible/roles/consul/tasks/main.yaml b/fdio.infra.ansible/roles/consul/tasks/main.yaml new file mode 100644 index 0000000000..99ac52da44 --- /dev/null +++ b/fdio.infra.ansible/roles/consul/tasks/main.yaml @@ -0,0 +1,182 @@ +--- +# file: roles/consul/tasks/main.yaml + +- name: Inst - Update Package Cache (APT) + apt: + update_cache: yes + cache_valid_time: 3600 + when: + - ansible_distribution|lower == 'ubuntu' + tags: + - consul-inst-prerequisites + +- name: Inst - Prerequisites + package: + name: "{{ packages | flatten(levels=1) }}" + state: latest + tags: + - consul-inst-prerequisites + +- name: Conf - Add Consul Group + group: + name: "{{ consul_group }}" + state: "{{ consul_group_state }}" + when: + - consul_manage_group | bool + tags: + - consul-conf-user + +- name: Conf - Add Consul user + user: + name: "{{ consul_user }}" + group: "{{ consul_group }}" + groups: "{{ consul_user_groups }}" + state: "{{ consul_user_state }}" + system: true + when: + - consul_manage_user | bool + tags: + - consul-conf-user + +- name: Inst - Clean Consul + file: + path: "{{ consul_inst_dir }}/consul" + state: "absent" + tags: + - consul-inst-package + +- name: Inst - Download Consul + get_url: + url: "{{ consul_zip_url }}" + dest: "{{ consul_inst_dir }}/{{ consul_pkg }}" + tags: + - consul-inst-package + +- name: Inst - Unarchive Consul + unarchive: + src: "{{ consul_inst_dir }}/{{ consul_pkg }}" + dest: "{{ consul_inst_dir }}/" + creates: "{{ consul_inst_dir }}/consul" + remote_src: true + tags: + - consul-inst-package + +- name: Inst - Consul + copy: + src: "{{ consul_inst_dir }}/consul" + dest: "{{ consul_bin_dir }}" + owner: "{{ consul_user }}" + group: "{{ consul_group }}" + force: true + mode: 0755 + remote_src: true + tags: + - consul-inst-package + +- name: Conf - Create Directories "{{ consul_data_dir }}" + file: + dest: "{{ consul_data_dir }}" + state: directory + owner: "{{ consul_user }}" + group: "{{ consul_group }}" + tags: + - consul-conf + +- name: Conf - Create Directories "{{ consul_ssl_dir }}" + file: + dest: "{{ consul_ssl_dir }}" + state: directory + owner: "{{ consul_user }}" + group: "{{ consul_group }}" + tags: + - consul-conf + +- name: Conf - Create Config Directory + file: + dest: "{{ consul_config_dir }}" + state: directory + owner: "{{ consul_user }}" + group: "{{ consul_group }}" + mode: 0755 + tags: + - consul-conf + +- name: Conf - Nomad integration Consul Configuration + template: + src: consul.hcl.j2 + dest: "{{ nomad_config_dir }}/consul.hcl" + owner: "nomad" + group: "nomad" + mode: 0644 + when: + - consul_nomad_integration | bool + tags: + - consul-conf + +- name: Conf - Base Configuration + template: + src: base.hcl.j2 + dest: "{{ consul_config_dir }}/base.hcl" + owner: "{{ consul_user }}" + group: "{{ consul_group }}" + mode: 0644 + tags: + - consul-conf + +- name: Conf - Ports Configuration + template: + src: ports.hcl.j2 + dest: "{{ consul_config_dir }}/ports.hcl" + owner: "{{ consul_user }}" + group: "{{ consul_group }}" + mode: 0644 + tags: + - consul-conf + +- name: Conf - Telemetry Configuration + template: + src: telemetry.hcl.j2 + dest: "{{ consul_config_dir }}/telemetry.hcl" + owner: "{{ consul_user }}" + group: "{{ consul_group }}" + mode: 0644 + tags: + - consul-conf + +- name: Conf - Services Configuration + template: + src: services.json.j2 + dest: "{{ consul_config_dir }}/services.json" + owner: "{{ consul_user }}" + group: "{{ consul_group }}" + mode: 0644 + when: + - consul_services + tags: + - consul-conf + +- name: Conf - Copy Certificates And Keys + copy: + content: "{{ item.src }}" + dest: "{{ item.dest }}" + owner: "{{ consul_user }}" + group: "{{ consul_group }}" + mode: 0600 + no_log: true + loop: "{{ consul_certificates | flatten(levels=1) }}" + tags: + - consul-conf + +- name: Conf - System.d Script + template: + src: "consul_systemd.service.j2" + dest: "/lib/systemd/system/consul.service" + owner: "root" + group: "root" + mode: 0644 +# notify: +# - "Restart Consul" +# - "Stop Systemd-resolved" +# - "Restart Nomad" + tags: + - consul-conf diff --git a/fdio.infra.ansible/roles/consul/templates/base.hcl.j2 b/fdio.infra.ansible/roles/consul/templates/base.hcl.j2 new file mode 100644 index 0000000000..536c48d847 --- /dev/null +++ b/fdio.infra.ansible/roles/consul/templates/base.hcl.j2 @@ -0,0 +1,43 @@ +node_name = "{{ consul_node_name }}" +datacenter = "{{ consul_datacenter }}" + +bind_addr = "{{ consul_bind_addr }}" +client_addr = "{{ consul_client_addr }}" +data_dir = "{{ consul_data_dir }}" + +enable_syslog = {{ consul_enable_syslog | bool | lower }} +enable_debug = {{ consul_enable_debug | bool | lower }} +disable_update_check = {{ consul_disable_update_check | bool | lower }} +log_level = "{{ consul_log_level }}" + +server = {{ consul_node_server | bool | lower }} +encrypt = "{{ consul_encrypt }}" +{% if consul_node_server | bool == True %} +bootstrap_expect = {{ consul_bootstrap_expect }} +verify_incoming = true +verify_outgoing = true +verify_server_hostname = true +ca_file = "{{ consul_ca_file }}" +cert_file = "{{ consul_cert_file }}" +key_file = "{{ consul_key_file }}" +auto_encrypt { + allow_tls = true +} +{% else %} +verify_incoming = false +verify_outgoing = false +verify_server_hostname = false +ca_file = "{{ consul_ca_file }}" +auto_encrypt { + tls = false +} +{% endif %} +{% if consul_retry_join | bool -%} +retry_join = [ {% for ip_port in consul_retry_servers -%} "{{ ip_port }}"{% if not loop.last %}, {% endif %}{%- endfor -%} ] +{%- endif %} + +ui = {{ consul_ui | bool | lower }} + +{% if consul_recursors -%} +recursors = [ {% for server in consul_recursors -%} "{{ server }}"{% if not loop.last %}, {% endif %}{%- endfor -%} ] +{%- endif %} \ No newline at end of file diff --git a/fdio.infra.ansible/roles/consul/templates/consul.hcl.j2 b/fdio.infra.ansible/roles/consul/templates/consul.hcl.j2 new file mode 100644 index 0000000000..c78e5e1ce5 --- /dev/null +++ b/fdio.infra.ansible/roles/consul/templates/consul.hcl.j2 @@ -0,0 +1,12 @@ +consul { + auto_advertise = {{ consul_auto_advertise | bool | lower }} + checks_use_advertise = {{ consul_checks_use_advertise | bool | lower }} + client_auto_join = {{ consul_client_auto_join | bool | lower }} + client_service_name = "{{ consul_client_service_name }}" + server_service_name = "{{ consul_server_service_name }}" + server_auto_join = {{ consul_server_auto_join | bool | lower }} +{% if consul_ACL_token_set == True %} + token = "{{ consul_token }}" +{% endif %} + +} \ No newline at end of file diff --git a/fdio.infra.ansible/roles/consul/templates/consul_systemd.service.j2 b/fdio.infra.ansible/roles/consul/templates/consul_systemd.service.j2 new file mode 100644 index 0000000000..8e1ef1310d --- /dev/null +++ b/fdio.infra.ansible/roles/consul/templates/consul_systemd.service.j2 @@ -0,0 +1,21 @@ +[Unit] +Description=Consul Service +Documentation=https://www.nomadproject.io/docs/ +Wants=network-online.target +After=network-online.target + +[Service] +# TODO: Decrease privilege +ExecReload=/bin/kill -SIGHUP $MAINPID +ExecStart={{ consul_bin_dir }}/consul agent -config-dir {{ consul_config_dir }} +KillSignal=SIGTERM +LimitNOFILE=infinity +LimitNPROC=infinity +Restart=on-failure +RestartSec=1 +User=root +Group=root +Environment="GOMAXPROCS=2" + +[Install] +WantedBy=multi-user.target diff --git a/fdio.infra.ansible/roles/consul/templates/ports.hcl.j2 b/fdio.infra.ansible/roles/consul/templates/ports.hcl.j2 new file mode 100644 index 0000000000..a658060ce8 --- /dev/null +++ b/fdio.infra.ansible/roles/consul/templates/ports.hcl.j2 @@ -0,0 +1,9 @@ +ports { + dns = {{ consul_port_dns }} + http = {{ consul_port_http }} + https = {{ consul_port_https }} + grpc = {{ consul_port_grpc }} + serf_lan = {{ consul_port_serf_lan }} + serf_wan = {{ consul_port_serf_wan }} + server = {{ consul_port_server }} +} \ No newline at end of file diff --git a/fdio.infra.ansible/roles/consul/templates/services.json.j2 b/fdio.infra.ansible/roles/consul/templates/services.json.j2 new file mode 100644 index 0000000000..3245ba92a4 --- /dev/null +++ b/fdio.infra.ansible/roles/consul/templates/services.json.j2 @@ -0,0 +1,13 @@ +{ + "services": [ +{% for item in consul_services %} + { + "name": "{{ item.name }}", + "port": {{ item.port }} + } +{%- if not loop.last %}, +{% endif %} +{% endfor %} + + ] +} \ No newline at end of file diff --git a/fdio.infra.ansible/roles/consul/templates/telemetry.hcl.j2 b/fdio.infra.ansible/roles/consul/templates/telemetry.hcl.j2 new file mode 100644 index 0000000000..ec7fabc9da --- /dev/null +++ b/fdio.infra.ansible/roles/consul/templates/telemetry.hcl.j2 @@ -0,0 +1,3 @@ +telemetry { + prometheus_retention_time = "24h" +} \ No newline at end of file diff --git a/fdio.infra.ansible/roles/consul/vars/main.yaml b/fdio.infra.ansible/roles/consul/vars/main.yaml new file mode 100644 index 0000000000..b46333a7a7 --- /dev/null +++ b/fdio.infra.ansible/roles/consul/vars/main.yaml @@ -0,0 +1,5 @@ +--- +# file: roles/consul/vars/main.yaml + +consul_node_client: "{{ (consul_node_role == 'client') or (consul_node_role == 'both') }}" +consul_node_server: "{{ (consul_node_role == 'server') or (consul_node_role == 'both') }}" diff --git a/fdio.infra.ansible/roles/csit_sut_image/files/Dockerfile b/fdio.infra.ansible/roles/csit_sut_image/files/Dockerfile new file mode 100644 index 0000000000..73ff5c5e86 --- /dev/null +++ b/fdio.infra.ansible/roles/csit_sut_image/files/Dockerfile @@ -0,0 +1,166 @@ +# Copyright (c) 2021 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM ubuntu:20.04 +LABEL Description="CSIT vpp-device ubuntu 20.04 SUT image" +LABEL Version="master" + +# Setup the environment +ENV DEBIAN_FRONTEND=noninteractive + +# Configure locales +RUN apt-get update -qq \ + && apt-get install -y \ + apt-utils \ + locales \ + && sed -i 's/# \(en_US\.UTF-8 .*\)/\1/' /etc/locale.gen \ + && locale-gen en_US.UTF-8 \ + && dpkg-reconfigure --frontend=noninteractive locales \ + && update-locale LANG=en_US.UTF-8 \ + && TZ=Etc/UTC && ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone \ + && rm -r /var/lib/apt/lists/* +ENV LANG="en_US.UTF-8" LANGUAGE="en_US" LC_ALL="en_US.UTF-8" + +# Install packages and Docker +RUN apt-get -q update \ + && apt-get install -y -qq \ + apt-transport-https \ + bridge-utils \ + ca-certificates \ + cloud-init \ + cmake \ + curl \ + dkms \ + gdb \ + gfortran \ + libapr1 \ + libblas-dev \ + libffi-dev \ + liblapack-dev \ + libmbedcrypto3 \ + libmbedtls12 \ + libmbedx509-0 \ + libnuma1 \ + libnuma-dev \ + libpcap-dev \ + libpixman-1-dev \ + libssl-dev \ + locales \ + net-tools \ + openssh-server \ + pciutils \ + python3-all \ + python3-apt \ + python3-cffi \ + python3-cffi-backend \ + python3-dev \ + python3-pip \ + python3-setuptools \ + python3-virtualenv \ + qemu-system \ + rsyslog \ + socat \ + software-properties-common \ + strongswan \ + ssh \ + sshpass \ + sudo \ + supervisor \ + tar \ + tcpdump \ + unzip \ + vim \ + wget \ + zlib1g-dev \ + && curl -fsSL https://get.docker.com | sh \ + && rm -rf /var/lib/apt/lists/* + +# Fix permissions +RUN chown root:syslog /var/log \ + && chmod 755 /etc/default + +# Create directory structure +RUN mkdir -p /tmp/dumps \ + && mkdir -p /var/cache/vpp/python \ + && mkdir -p /var/run/sshd + +# CSIT PIP pre-cache +RUN pip3 install \ + ecdsa==0.13.3 \ + paramiko==2.6.0 \ + pycrypto==2.6.1 \ + pypcap==1.2.3 \ + PyYAML==5.1.1 \ + requests==2.22.0 \ + robotframework==3.1.2 \ + scapy==2.4.3 \ + scp==0.13.2 \ + ansible==2.10.7 \ + dill==0.2.8.2 \ + numpy==1.17.3 \ + hdrhistogram==0.6.1 \ + plotly==4.1.1 \ + PTable==0.9.2 \ + Sphinx==2.2.1 \ + sphinx-rtd-theme==0.4.0 \ + sphinxcontrib-programoutput==0.15 \ + sphinxcontrib-robotdoc==0.11.0 \ + ply==3.11 \ + alabaster==0.7.12 \ + Babel==2.7.0 \ + bcrypt==3.1.7 \ + certifi==2019.9.11 \ + cffi==1.13.2 \ + chardet==3.0.4 \ + cryptography==2.8 \ + docutils==0.15.2 \ + future==0.18.2 \ + idna==2.8 \ + imagesize==1.1.0 \ + Jinja2==2.10.3 \ + MarkupSafe==1.1.1 \ + packaging==19.2 \ + pbr==5.4.3 \ + pycparser==2.19 \ + Pygments==2.4.2 \ + PyNaCl==1.3.0 \ + pyparsing==2.4.4 \ + python-dateutil==2.8.1 \ + pytz==2019.3 \ + retrying==1.3.3 \ + six==1.13.0 \ + snowballstemmer==2.0.0 \ + sphinxcontrib-applehelp==1.0.1 \ + sphinxcontrib-devhelp==1.0.1 \ + sphinxcontrib-htmlhelp==1.0.2 \ + sphinxcontrib-jsmath==1.0.1 \ + sphinxcontrib-qthelp==1.0.2 \ + sphinxcontrib-serializinghtml==1.1.3 \ + urllib3==1.25.6 + +# ARM workaround +RUN pip3 install \ + pandas==0.25.3 \ + scipy==1.5.4 + +# SSH settings +RUN echo 'root:Csit1234' | chpasswd \ + && sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config \ + && sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd \ + && echo "export VISIBLE=now" >> /etc/profile + +EXPOSE 2222 + +COPY supervisord.conf /etc/supervisor/supervisord.conf + +CMD ["sh", "-c", "rm -f /dev/shm/db /dev/shm/global_vm /dev/shm/vpe-api; /usr/bin/supervisord -c /etc/supervisor/supervisord.conf; /usr/sbin/sshd -D -p 2222"] \ No newline at end of file diff --git a/fdio.infra.ansible/roles/csit_sut_image/files/supervisord.conf b/fdio.infra.ansible/roles/csit_sut_image/files/supervisord.conf new file mode 100644 index 0000000000..22a36be5c6 --- /dev/null +++ b/fdio.infra.ansible/roles/csit_sut_image/files/supervisord.conf @@ -0,0 +1,24 @@ +[unix_http_server] +file = /tmp/supervisor.sock +chmod = 0777 + +[rpcinterface:supervisor] +supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface + +[supervisorctl] +serverurl = unix:///tmp/supervisor.sock + +[supervisord] +pidfile = /tmp/supervisord.pid +identifier = supervisor +directory = /tmp +logfile = /tmp/supervisord.log +loglevel = debug +nodaemon = false + +[program:vpp] +command = /usr/bin/vpp -c /etc/vpp/startup.conf +autostart = false +autorestart = true +redirect_stderr = true +priority = 1 \ No newline at end of file diff --git a/fdio.infra.ansible/roles/csit_sut_image/tasks/main.yaml b/fdio.infra.ansible/roles/csit_sut_image/tasks/main.yaml new file mode 100644 index 0000000000..2affe4b18e --- /dev/null +++ b/fdio.infra.ansible/roles/csit_sut_image/tasks/main.yaml @@ -0,0 +1,30 @@ +--- +# file: roles/csit_sut_image/tasks/main.yaml + +- name: Create a directory if it does not exist + file: + path: "/opt/csit-sut/" + state: "directory" + mode: 0755 + tags: + - csit-sut-image + +- name: Copy Build Items + copy: + src: "{{ item }}" + dest: "/opt/csit-sut/" + owner: "root" + group: "root" + mode: 0755 + with_items: + - Dockerfile + - supervisord.conf + tags: + - csit-sut-image + +- name: Build CSIT SUT Docker Image + shell: "docker build -t csit_sut-ubuntu2004:local ." + args: + chdir: "/opt/csit-sut" + tags: + - csit-sut-image \ No newline at end of file diff --git a/fdio.infra.ansible/roles/docker/defaults/main.yaml b/fdio.infra.ansible/roles/docker/defaults/main.yaml new file mode 100644 index 0000000000..8343558238 --- /dev/null +++ b/fdio.infra.ansible/roles/docker/defaults/main.yaml @@ -0,0 +1,38 @@ +--- +# file: roles/docker/defaults/main.yaml + +# Version options. +docker_edition: "ce" +docker_package: "docker-{{ docker_edition }}" +docker_package_state: latest + +# Service options. +docker_service_state: started +docker_service_enabled: true +docker_restart_handler_state: restarted + +# Used only for Debian/Ubuntu. +docker_apt_release_channel: "stable" +docker_apt_repository: "deb https://download.docker.com/linux/{{ ansible_distribution|lower }} {{ ansible_distribution_release }} {{ docker_apt_release_channel }}" +docker_apt_repository_state: present +docker_apt_ignore_key_error: true +docker_apt_gpg_key: "https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg" +docker_apt_gpg_key_state: present + +# Used only for RedHat/CentOS/Fedora. +docker_yum_repo_url: https://download.docker.com/linux/{{ (ansible_distribution == "Fedora") | ternary("fedora","centos") }}/docker-{{ docker_edition }}.repo +docker_yum_repo_enable_edge: "0" +docker_yum_repo_enable_test: "0" +docker_yum_gpg_key: https://download.docker.com/linux/centos/gpg + +# A list of users who will be added to the docker group. +docker_users: + - "testuser" + +# Proxy settings. +docker_daemon_environment_http: + - "HTTP_PROXY={{ proxy_env.http_proxy }}" + - "NO_PROXY={{ proxy_env.no_proxy }}" +docker_daemon_environment_https: + - "HTTPS_PROXY={{ proxy_env.https_proxy }}" + - "NO_PROXY={{ proxy_env.no_proxy }}" \ No newline at end of file diff --git a/fdio.infra.ansible/roles/docker/handlers/main.yaml b/fdio.infra.ansible/roles/docker/handlers/main.yaml new file mode 100644 index 0000000000..d89adb9a1a --- /dev/null +++ b/fdio.infra.ansible/roles/docker/handlers/main.yaml @@ -0,0 +1,9 @@ +--- +# file roles/docker/handlers/main.yaml + +- name: Restart Docker + service: + name: "docker" + state: "{{ docker_restart_handler_state }}" + tags: + - docker-restart-service \ No newline at end of file diff --git a/fdio.infra.ansible/roles/docker/meta/main.yaml b/fdio.infra.ansible/roles/docker/meta/main.yaml new file mode 100644 index 0000000000..ab3d197791 --- /dev/null +++ b/fdio.infra.ansible/roles/docker/meta/main.yaml @@ -0,0 +1,4 @@ +--- +# file: roles/docker/meta/main.yaml + +dependencies: [] \ No newline at end of file diff --git a/fdio.infra.ansible/roles/docker/tasks/main.yaml b/fdio.infra.ansible/roles/docker/tasks/main.yaml new file mode 100644 index 0000000000..5a96b7a7c5 --- /dev/null +++ b/fdio.infra.ansible/roles/docker/tasks/main.yaml @@ -0,0 +1,82 @@ +--- +# file: roles/docker/tasks/main.yaml + +- include_tasks: "{{ ansible_distribution|lower }}_{{ ansible_distribution_release }}.yaml" + tags: + - docker-inst-dependencies + +- name: Inst - Docker + package: + name: + - "{{ docker_package }}" + - "{{ docker_package }}-cli" + state: "{{ docker_package_state }}" + tags: + - docker-inst-package + +- name: Conf - Docker Service + service: + name: docker + state: "{{ docker_service_state }}" + enabled: "{{ docker_service_enabled }}" + tags: + - docker-conf-service + +- name: Conf - Docker Service Directory + file: + path: "/etc/systemd/system/docker.service.d" + state: "directory" + tags: + - docker-conf-service + +- name: Conf - Docker Daemon + template: + src: "templates/daemon.json.j2" + dest: "/etc/docker/daemon.json" + owner: "root" + group: "root" + mode: "0644" + when: > + docker_daemon is defined + tags: + - docker-conf-daemon + +- name: Conf - Docker HTTP Proxy + template: + src: "templates/docker.service.proxy.http" + dest: "/etc/systemd/system/docker.service.d/http-proxy.conf" + owner: "root" + group: "root" + mode: "0644" + notify: + - "Restart Docker" + when: > + proxy_env is defined and + proxy_env.http_proxy is defined + tags: + - docker-conf-service + +- name: Conf - Docker HTTPS Proxy + template: + src: "templates/docker.service.proxy.https" + dest: "/etc/systemd/system/docker.service.d/https-proxy.conf" + owner: "root" + group: "root" + mode: "0644" + notify: + - "Restart Docker" + when: > + proxy_env is defined and + proxy_env.https_proxy is defined + tags: + - docker-conf-service + +- name: Conf - Users to Docker Group + user: + name: "{{ item }}" + groups: "docker" + append: True + loop: "{{ docker_users }}" + when: docker_users + tags: + - docker-conf-user diff --git a/fdio.infra.ansible/roles/docker/tasks/ubuntu_bionic.yaml b/fdio.infra.ansible/roles/docker/tasks/ubuntu_bionic.yaml new file mode 100644 index 0000000000..8bda4fed21 --- /dev/null +++ b/fdio.infra.ansible/roles/docker/tasks/ubuntu_bionic.yaml @@ -0,0 +1,30 @@ +--- +# file: roles/docker/tasks/ubuntu_bionic.yaml + +- name: Inst - Dependencies + apt: + name: + - "apt-transport-https" + - "ca-certificates" + - "gpg-agent" + - "software-properties-common" + state: "present" + cache_valid_time: 3600 + install_recommends: False + tags: + - docker-inst-dependencies + +- name: Conf - Add APT Key + apt_key: + url: "{{ docker_apt_gpg_key }}" + state: "{{ docker_apt_gpg_key_state }}" + tags: + - docker-conf-apt + +- name: Conf - Install APT Repository + apt_repository: + repo: "{{ docker_apt_repository }}" + state: "{{ docker_apt_repository_state }}" + update_cache: yes + tags: + - docker-conf-apt diff --git a/fdio.infra.ansible/roles/docker/tasks/ubuntu_focal.yaml b/fdio.infra.ansible/roles/docker/tasks/ubuntu_focal.yaml new file mode 100644 index 0000000000..84bd1c5824 --- /dev/null +++ b/fdio.infra.ansible/roles/docker/tasks/ubuntu_focal.yaml @@ -0,0 +1,30 @@ +--- +# file: roles/docker/tasks/ubuntu_focal.yaml + +- name: Inst - Dependencies + apt: + name: + - "apt-transport-https" + - "ca-certificates" + - "gpg-agent" + - "software-properties-common" + state: "present" + cache_valid_time: 3600 + install_recommends: False + tags: + - docker-inst-dependencies + +- name: Conf - Add APT Key + apt_key: + url: "{{ docker_apt_gpg_key }}" + state: "{{ docker_apt_gpg_key_state }}" + tags: + - docker-conf-apt + +- name: Conf - Install APT Repository + apt_repository: + repo: "{{ docker_apt_repository }}" + state: "{{ docker_apt_repository_state }}" + update_cache: yes + tags: + - docker-conf-apt diff --git a/fdio.infra.ansible/roles/docker/templates/daemon.json.j2 b/fdio.infra.ansible/roles/docker/templates/daemon.json.j2 new file mode 100644 index 0000000000..becc2b1af7 --- /dev/null +++ b/fdio.infra.ansible/roles/docker/templates/daemon.json.j2 @@ -0,0 +1 @@ +{{ docker_daemon | to_nice_json }} \ No newline at end of file diff --git a/fdio.infra.ansible/roles/docker/templates/docker.service.proxy.http b/fdio.infra.ansible/roles/docker/templates/docker.service.proxy.http new file mode 100644 index 0000000000..73ceba3870 --- /dev/null +++ b/fdio.infra.ansible/roles/docker/templates/docker.service.proxy.http @@ -0,0 +1,4 @@ +# {{ ansible_managed }} + +[Service] +Environment="{{ docker_daemon_environment_http | join('" "') }}" diff --git a/fdio.infra.ansible/roles/docker/templates/docker.service.proxy.https b/fdio.infra.ansible/roles/docker/templates/docker.service.proxy.https new file mode 100644 index 0000000000..1c2097eb9d --- /dev/null +++ b/fdio.infra.ansible/roles/docker/templates/docker.service.proxy.https @@ -0,0 +1,4 @@ +# {{ ansible_managed }} + +[Service] +Environment="{{ docker_daemon_environment_https | join('" "') }}" diff --git a/fdio.infra.ansible/roles/dpdk/defaults/main.yaml b/fdio.infra.ansible/roles/dpdk/defaults/main.yaml new file mode 100644 index 0000000000..2a8c691728 --- /dev/null +++ b/fdio.infra.ansible/roles/dpdk/defaults/main.yaml @@ -0,0 +1,31 @@ +--- +# file: roles/dpdk/defaults/main.yaml + +packages: "{{ packages_base + packages_by_distro[ansible_distribution|lower][ansible_distribution_release] + packages_by_arch[ansible_machine] }}" + +packages_base: + - [] + +packages_by_distro: + ubuntu: + bionic: + - "build-essential" + - "libnuma-dev" + focal: + - "build-essential" + - "libnuma-dev" + +packages_by_arch: + aarch64: + - [] + x86_64: + - [] + +dpdk_target_dir: "/opt" +dpdk_version: + - "20.02" +dpdk_url: "https://fast.dpdk.org/rel" +dpdk_build_targets: + "20.02": + aarch64: "arm64-armv8a-linux-gcc" + x86_64: "x86_64-native-linux-gcc" diff --git a/fdio.infra.ansible/roles/dpdk/files/dpdk-mlx5.patch b/fdio.infra.ansible/roles/dpdk/files/dpdk-mlx5.patch new file mode 100644 index 0000000000..a3928d70f7 --- /dev/null +++ b/fdio.infra.ansible/roles/dpdk/files/dpdk-mlx5.patch @@ -0,0 +1,19 @@ +diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c +index d7d3bc73c..c21c38485 100644 +--- a/drivers/net/mlx5/mlx5_ethdev.c ++++ b/drivers/net/mlx5/mlx5_ethdev.c +@@ -1032,11 +1032,14 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, + ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX); + dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); ++#if 0 ++ /* FIXME: this does not work on Azure w/ CX4-LX */ + if (((dev_link.link_speed && !dev_link.link_status) || + (!dev_link.link_speed && dev_link.link_status))) { + rte_errno = EAGAIN; + return -rte_errno; + } ++#endif + *link = dev_link; + return 0; + } diff --git a/fdio.infra.ansible/roles/dpdk/tasks/main.yaml b/fdio.infra.ansible/roles/dpdk/tasks/main.yaml new file mode 100644 index 0000000000..46f942be93 --- /dev/null +++ b/fdio.infra.ansible/roles/dpdk/tasks/main.yaml @@ -0,0 +1,68 @@ +--- +# file: roles/dpdk/tasks/main.yaml + +- name: Inst - Update Package Cache (APT) + apt: + update_cache: yes + cache_valid_time: 3600 + when: + - ansible_distribution|lower == 'ubuntu' + tags: + - dpdk-inst-prerequisites + +- name: Inst - Prerequisites + package: + name: "{{ packages | flatten(levels=1) }}" + state: latest + tags: + - dpdk-inst-prerequisites + +- name: Download Release Archive + get_url: + url: "{{ dpdk_url }}/dpdk-{{ item }}.tar.xz" + dest: "{{ dpdk_target_dir }}/dpdk-{{ item }}.tar.xz" + mode: 0644 + loop: "{{ dpdk_version }}" + register: "dpdk_downloaded" + tags: + - dpdk-inst + +- name: Extract Release Archive + unarchive: + remote_src: true + src: "{{ dpdk_target_dir }}/dpdk-{{ item }}.tar.xz" + dest: "{{ dpdk_target_dir }}/" + creates: "{{ dpdk_target_dir }}/dpdk-{{ item }}" + loop: "{{ dpdk_version }}" + when: "dpdk_downloaded" + register: "dpdk_extracted" + tags: + - dpdk-inst + +- name: Build igb_uio by default + lineinfile: + dest: "{{ dpdk_target_dir }}/dpdk-{{ item }}/config/common_base" + regexp: "^CONFIG_RTE_EAL_IGB_UIO" + line: "CONFIG_RTE_EAL_IGB_UIO=y" + loop: "{{ dpdk_version }}" + when: "dpdk_extracted" + register: "dpdk_configured" + tags: + - dpdk-inst + +- name: Compile Release I + become: yes + command: "make install T={{ dpdk_build_targets[item][ansible_machine] }} DESTDIR={{ dpdk_target_dir }}/dpdk-{{ item }} chdir={{ dpdk_target_dir }}/dpdk-{{ item }}" + loop: "{{ dpdk_version }}" + when: "dpdk_configured" + register: "dpdk_compiled" + tags: + - dpdk-inst + +- name: Link igb_uio Module + shell: "ln -fs {{ dpdk_target_dir }}/dpdk-{{ item }}/{{ dpdk_build_targets[item][ansible_machine] }}/kmod/igb_uio.ko /lib/modules/`uname -r`/igb_uio.ko && depmod -a" + ignore_errors: "yes" + loop: "{{ dpdk_version }}" + when: "dpdk_compiled" + tags: + - dpdk-inst \ No newline at end of file diff --git a/fdio.infra.ansible/roles/iperf/defaults/main.yaml b/fdio.infra.ansible/roles/iperf/defaults/main.yaml new file mode 100644 index 0000000000..07af60b63a --- /dev/null +++ b/fdio.infra.ansible/roles/iperf/defaults/main.yaml @@ -0,0 +1,26 @@ +--- +# file: roles/iperf/defaults/main.yaml + +packages: "{{ packages_base + packages_by_distro[ansible_distribution|lower][ansible_distribution_release] + packages_by_arch[ansible_machine] }}" + +packages_base: + - [] + +packages_by_distro: + ubuntu: + bionic: + - "build-essential" + - "lib32z1" + focal: + - "build-essential" + - "lib32z1" + +packages_by_arch: + aarch64: + - [] + x86_64: + - [] + +iperf_target_dir: "/opt" +iperf_version: + - "3.7" diff --git a/fdio.infra.ansible/roles/iperf/tasks/main.yaml b/fdio.infra.ansible/roles/iperf/tasks/main.yaml new file mode 100644 index 0000000000..f8948cae57 --- /dev/null +++ b/fdio.infra.ansible/roles/iperf/tasks/main.yaml @@ -0,0 +1,62 @@ +--- +# file: roles/iperf/tasks/main.yaml + +- name: Inst - Update Package Cache (APT) + apt: + update_cache: yes + cache_valid_time: 3600 + when: + - ansible_distribution|lower == 'ubuntu' + tags: + - iperf-inst-prerequisites + +- name: Inst - Prerequisites + package: + name: "{{ packages | flatten(levels=1) }}" + state: latest + tags: + - iperf-inst-prerequisites + +- name: Get Release Archive + get_url: + url: "https://downloads.es.net/pub/iperf/iperf-{{ item }}.tar.gz" + dest: "{{ iperf_target_dir }}/iperf-{{ item }}.tar.gz" + validate_certs: false + mode: 0644 + loop: "{{ iperf_version }}" + tags: + - iperf-inst + +- name: Extract Release Archive + unarchive: + remote_src: true + src: "{{ iperf_target_dir }}/iperf-{{ item }}.tar.gz" + dest: "{{ iperf_target_dir }}/" + creates: "{{ iperf_target_dir }}/iperf-{{ item }}/src" + loop: "{{ iperf_version }}" + tags: + - iperf-inst + +- name: Compile Release I + command: "./configure" + args: + chdir: "{{ iperf_target_dir }}/iperf-{{ item }}/" + loop: "{{ iperf_version }}" + tags: + - iperf-inst + +- name: Compile Release II + command: "make" + args: + chdir: "{{ iperf_target_dir }}/iperf-{{ item }}/" + loop: "{{ iperf_version }}" + tags: + - iperf-inst + +- name: Compile Release III + command: "make install" + args: + chdir: "{{ iperf_target_dir }}/iperf-{{ item }}/" + loop: "{{ iperf_version }}" + tags: + - iperf-inst \ No newline at end of file diff --git a/fdio.infra.ansible/roles/jenkins_job_health_exporter/defaults/main.yaml b/fdio.infra.ansible/roles/jenkins_job_health_exporter/defaults/main.yaml new file mode 100644 index 0000000000..9813d41afb --- /dev/null +++ b/fdio.infra.ansible/roles/jenkins_job_health_exporter/defaults/main.yaml @@ -0,0 +1,35 @@ +--- +# file: roles/jenkins_job_health_exporter/defaults/main.yaml + +# Conf - Jenkins Job Health Exporter. +jenkins_host: "jenkins.fd.io" +poll_interval_sec: 1800 +req_timeout_sec: 30 +bind_to: "0.0.0.0:9186" +last_builds: 10 +jobs: + - "vpp-csit-verify-api-crc-master" + - "vpp-beta-verify-master-ubuntu2004-aarch64" + - "vpp-verify-master-centos8-aarch64" + - "vpp-verify-master-ubuntu1804-aarch64" + - "vpp-gcc-verify-master-ubuntu2004-x86_64" + - "vpp-verify-master-centos8-x86_64" + - "vpp-verify-master-debian10-x86_64" + - "vpp-verify-master-ubuntu2004-x86_64" + - "vpp-verify-master-ubuntu1804-x86_64" + - "vpp-debug-verify-master-ubuntu2004-x86_64" + - "vpp-checkstyle-verify-master-ubuntu2004-x86_64" + - "vpp-sphinx-docs-verify-master-ubuntu1804-x86_64" + - "vpp-docs-verify-master-ubuntu1804-x86_64" + - "vpp-make-test-docs-verify-master-ubuntu1804-x86_64" + - "vpp-csit-verify-device-master-1n-skx" + - "vpp-csit-verify-device-master-1n-tx2" + +# Conf - Service. +jenkins_job_health_exporter_restart_handler_state: "restarted" + +# Inst - System paths. +jenkins_job_health_exporter_target_dir: "/usr/bin" +jenkins_job_health_exporter_conf_dir: "/etc" +jenkins_job_health_exporter_url: "https://github.com/ayourtch/jenkins-job-health-exporter/releases/download" +jenkins_job_health_exporter_version: "v0.0.3" \ No newline at end of file diff --git a/fdio.infra.ansible/roles/jenkins_job_health_exporter/handlers/main.yaml b/fdio.infra.ansible/roles/jenkins_job_health_exporter/handlers/main.yaml new file mode 100644 index 0000000000..29fee98fed --- /dev/null +++ b/fdio.infra.ansible/roles/jenkins_job_health_exporter/handlers/main.yaml @@ -0,0 +1,9 @@ +--- +# file roles/jenkins_job_health_exporter/handlers/main.yaml + +- name: Restart Jenkins Job Health Exporter + systemd: + daemon_reload: true + enabled: true + name: "jenkins-job-health-exporter" + state: "{{ jenkins_job_health_exporter_restart_handler_state }}" diff --git a/fdio.infra.ansible/roles/jenkins_job_health_exporter/tasks/main.yaml b/fdio.infra.ansible/roles/jenkins_job_health_exporter/tasks/main.yaml new file mode 100644 index 0000000000..5dbe476019 --- /dev/null +++ b/fdio.infra.ansible/roles/jenkins_job_health_exporter/tasks/main.yaml @@ -0,0 +1,38 @@ +--- +# file: roles/jenkins_job_health_exporter/tasks/main.yaml + +- name: Conf - Jenkins Job Health Exporter Config + template: + src: "templates/jenkins-job-health-exporter.j2" + dest: "/etc/jenkins-job-health-exporter.json" + owner: "root" + group: "root" + mode: "0644" + when: + - ansible_hostname == "s42-nomad" + tags: + - conf-jenkins-job-json + +- name: Inst - Jenkins Job Health Exporter Binary + get_url: + url: "{{ jenkins_job_health_exporter_url }}/{{ jenkins_job_health_exporter_version }}/jenkins-job-health-exporter" + dest: "{{ jenkins_job_health_exporter_target_dir }}/jenkins-job-health-exporter" + mode: "0755" + when: + - ansible_hostname == "s42-nomad" + tags: + - inst-jenkins-job-binary + +- name: Inst - Jenkins Job Health Exporter Service + template: + src: "templates/jenkins-job-health-exporter.service.j2" + dest: "/lib/systemd/system/jenkins-job-health-exporter.service" + owner: "root" + group: "root" + mode: "0644" + when: + - ansible_hostname == "s42-nomad" + notify: + - "Restart Jenkins Job Health Exporter" + tags: + - inst-jenkins-job-service diff --git a/fdio.infra.ansible/roles/jenkins_job_health_exporter/templates/jenkins-job-health-exporter.j2 b/fdio.infra.ansible/roles/jenkins_job_health_exporter/templates/jenkins-job-health-exporter.j2 new file mode 100644 index 0000000000..5942b782e0 --- /dev/null +++ b/fdio.infra.ansible/roles/jenkins_job_health_exporter/templates/jenkins-job-health-exporter.j2 @@ -0,0 +1,16 @@ +{ + "jenkins_host": "{{ jenkins_host }}", + "poll_interval_sec": {{ poll_interval_sec }}, + "req_timeout_sec": {{ req_timeout_sec }}, + "bind_to": "{{ bind_to }}", + "last_builds": {{ last_builds }}, + "jobs": [ +{% for item in jobs %} + "{{ item }}" +{%- if not loop.last %}, +{% endif %} +{% endfor %} + + ], + "verbose": 3 +} \ No newline at end of file diff --git a/fdio.infra.ansible/roles/jenkins_job_health_exporter/templates/jenkins-job-health-exporter.service.j2 b/fdio.infra.ansible/roles/jenkins_job_health_exporter/templates/jenkins-job-health-exporter.service.j2 new file mode 100644 index 0000000000..38073d0a8c --- /dev/null +++ b/fdio.infra.ansible/roles/jenkins_job_health_exporter/templates/jenkins-job-health-exporter.service.j2 @@ -0,0 +1,13 @@ +[Unit] +Description=Jenkins Job Health Exporter +Documentation=https://github.com/ayourtch/jenkins-job-health-exporter + +[Service] +Restart=always +ExecStart={{ jenkins_job_health_exporter_target_dir }}/jenkins-job-health-exporter {{ jenkins_job_health_exporter_conf_dir }}/jenkins-job-health-exporter.json +ExecReload=/bin/kill -HUP $MAINPID +TimeoutStopSec=20s +SendSIGKILL=no + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/fdio.infra.ansible/roles/kernel/defaults/main.yaml b/fdio.infra.ansible/roles/kernel/defaults/main.yaml new file mode 100644 index 0000000000..b9b4253622 --- /dev/null +++ b/fdio.infra.ansible/roles/kernel/defaults/main.yaml @@ -0,0 +1,43 @@ +--- +# file: roles/kernel/defaults/main.yaml + +# Kernel version to install (Default to any version). +kernel_version: "{{ kernel_version_by_distro[ansible_distribution|lower][ansible_distribution_release] | join(' ') }}" + +kernel_version_by_distro: + ubuntu: + bionic: + - "4.15.0-72" + focal: + - "5.4.0-65" + +kernel_packages: "{{ kernel_packages_by_distro[ansible_distribution|lower][ansible_distribution_release] | flatten(levels=1) }}" + +kernel_packages_by_distro: + ubuntu: + bionic: + - "linux-image" + - "linux-headers" + - "linux-modules" + - "linux-modules-extra" + - "linux-tools" + focal: + - "linux-image" + - "linux-headers" + - "linux-modules" + - "linux-modules-extra" + - "linux-tools" + +# Packages to remove in relation to kernel upgrade. +absent_packages: "{{ absent_packages_by_distro[ansible_distribution|lower][ansible_distribution_release] | flatten(levels=1) }}" + +absent_packages_by_distro: + ubuntu: + bionic: + - "amd64-microcode" + - "intel-microcode" + - "iucode-tool" + focal: + - "amd64-microcode" + - "intel-microcode" + - "iucode-tool" \ No newline at end of file diff --git a/fdio.infra.ansible/roles/kernel/filter_plugins/main.py b/fdio.infra.ansible/roles/kernel/filter_plugins/main.py new file mode 100644 index 0000000000..7d909b90e8 --- /dev/null +++ b/fdio.infra.ansible/roles/kernel/filter_plugins/main.py @@ -0,0 +1,143 @@ + +"""Extra Ansible filters""" + +def deb_kernel(packages, kernel_version, current_version): + """ + Return best matching kernel version. + Args: + packages (dict): apt-cache showpkg output. + kernel_version (str): Kernel version to install. + current_version (str): Current kernel version. + Returns: + str: kernel version. + """ + kernels = set() + + # List all available kernel version and associated repository + for line in packages['stdout'].splitlines(): + line = line.strip() + if line.startswith('Package: ') and ( + line.endswith('-common') or # Debian + line.endswith('-generic')): # Ubuntu + kernel = line.split()[1] + + for string in ('linux-headers-', 'common', 'generic'): + kernel = kernel.replace(string, '') + kernel = kernel.strip('-') + + if kernel: + kernels.add(kernel) + + # Sort Kernel versions + versions = {} + for kernel in kernels: + try: + version, build = kernel.split('-', 1) + except ValueError: + version = kernel + build = '' + versions[kernel] = list( + int(ver) for ver in version.split('.')) + [build] + kernels = sorted(versions.keys(), key=versions.get, reverse=True) + + # Return more recent kernel package that match version requirement + for kernel in kernels: + if kernel.startswith(kernel_version): + return kernel + + raise RuntimeError( + 'No kernel matching to "%s". Available kernel versions: %s' % ( + kernel_version, ', '.join(reversed(kernels)))) + + +def _deb_kernel_package(kernel, dist, arch, name): + """ + Return kernel package name. + Args: + kernel (str): Kernel version. + dist (str): Distribution. + arch (str): Architecture. + name (str): Package name. + Returns: + str: kernel package. + """ + # Define package suffix + if dist == 'Ubuntu': + suffix = 'generic' + elif name == 'linux-image': + suffix = arch.replace('x86_64', 'amd64') + else: + suffix = 'common' + + return '-'.join((name, kernel, suffix)) + + +def deb_kernel_pkg(packages, kernel_version, current_version, dist, arch, name): + """ + Return kernel package to install. + Args: + packages (dict): apt-cache showpkg output. + kernel_version (str): Kernel version to install. + current_version (str): Current kernel version. + dist (str): Distribution. + arch (str): Architecture. + name (str): Package name. + Returns: + str: kernel package to install. + """ + return _deb_kernel_package( + deb_kernel(packages, kernel_version, current_version), dist, arch, name) + + +def deb_installed_kernel(installed, packages, kernel_version, current_version): + """ + Return old kernel packages to remove. + Args: + installed (dict): dpkg -l output. + packages (dict): apt-cache showpkg output. + kernel_version (str): Kernel version to install. + current_version (str): Current kernel version. + Returns: + list of str: Kernel packages to remove. + """ + # Filter installed package to keep + to_keep = deb_kernel(packages, kernel_version, current_version) + + # Return installed package to remove + to_remove = [] + for line in installed['stdout'].splitlines(): + if ' linux-' not in line: + continue + + package = line.split()[1] + if ((package.startswith('linux-image-') or + package.startswith('linux-headers-')) and not ( + package.startswith('linux-image-' + to_keep) or + package.startswith('linux-headers-' + to_keep))): + to_remove.append(package) + + return to_remove + + +def kernel_match(kernel, kernel_spec): + """ + Check if kernel version match. + Args: + kernel (str): Kernel + kernel_spec (str): Kernel to match. + Returns: + bool: True if Kernel match. + """ + return kernel.startswith(kernel_spec) + + +class FilterModule(object): + """Return filter plugin""" + + @staticmethod + def filters(): + """Return filter""" + return {'deb_kernel': deb_kernel, + 'deb_kernel_pkg': deb_kernel_pkg, + 'deb_installed_kernel': deb_installed_kernel, + 'kernel_match': kernel_match} diff --git a/fdio.infra.ansible/roles/kernel/handlers/main.yaml b/fdio.infra.ansible/roles/kernel/handlers/main.yaml new file mode 100644 index 0000000000..963fd71756 --- /dev/null +++ b/fdio.infra.ansible/roles/kernel/handlers/main.yaml @@ -0,0 +1,8 @@ +--- +# file roles/kernel/handlers/main.yaml + +- name: Reboot Server + reboot: + reboot_timeout: 3600 + tags: + - reboot-server \ No newline at end of file diff --git a/fdio.infra.ansible/roles/kernel/tasks/main.yaml b/fdio.infra.ansible/roles/kernel/tasks/main.yaml new file mode 100644 index 0000000000..431e344fb8 --- /dev/null +++ b/fdio.infra.ansible/roles/kernel/tasks/main.yaml @@ -0,0 +1,9 @@ +--- +# file: roles/kernel/tasks/main.yaml + +- name: Inst - Prerequisites + include_tasks: "{{ ansible_distribution|lower }}_{{ ansible_distribution_release }}.yaml" + tags: + - kernel-inst-prerequisites + +- meta: flush_handlers diff --git a/fdio.infra.ansible/roles/kernel/tasks/ubuntu_bionic.yaml b/fdio.infra.ansible/roles/kernel/tasks/ubuntu_bionic.yaml new file mode 100644 index 0000000000..349c853c11 --- /dev/null +++ b/fdio.infra.ansible/roles/kernel/tasks/ubuntu_bionic.yaml @@ -0,0 +1,51 @@ +--- +# file: roles/kernel/tasks/ubuntu_bionic.yaml + +- name: Get Available Kernel Versions + command: "apt-cache showpkg linux-headers-*" + changed_when: false + register: apt_kernel_list + tags: + - kernel-inst + +- name: Get installed packages with APT + command: "dpkg -l" + changed_when: false + register: apt_packages_list + tags: + - kernel-inst + +- name: Set target APT kernel version + set_fact: + _kernel: "{{ apt_kernel_list | deb_kernel( + kernel_version, ansible_kernel) }}" + tags: + - kernel-inst + +- name: Ensure Packages Versions + apt: + name: "{{ apt_kernel_list | deb_kernel_pkg( + kernel_version, ansible_kernel, ansible_distribution, + ansible_architecture, item) }}" + loop: "{{ kernel_packages }}" + tags: + - kernel-inst + +- name: Ensure Any Other Kernel Packages Are Removed + apt: + name: "{{ apt_packages_list | deb_installed_kernel( + apt_kernel_list, kernel_version, ansible_kernel) }}" + state: absent + purge: true + notify: + - "Reboot Server" + tags: + - kernel-inst + +- name: Ensure Any Microcode Is Absent + apt: + name: "{{ absent_packages }}" + state: absent + purge: true + tags: + - kernel-inst \ No newline at end of file diff --git a/fdio.infra.ansible/roles/kernel/tasks/ubuntu_focal.yaml b/fdio.infra.ansible/roles/kernel/tasks/ubuntu_focal.yaml new file mode 100644 index 0000000000..9cbc4d4787 --- /dev/null +++ b/fdio.infra.ansible/roles/kernel/tasks/ubuntu_focal.yaml @@ -0,0 +1,51 @@ +--- +# file: roles/kernel/tasks/ubuntu_focal.yaml + +- name: Get Available Kernel Versions + command: "apt-cache showpkg linux-headers-*" + changed_when: false + register: apt_kernel_list + tags: + - kernel-inst + +- name: Get installed packages with APT + command: "dpkg -l" + changed_when: false + register: apt_packages_list + tags: + - kernel-inst + +- name: Set target APT kernel version + set_fact: + _kernel: "{{ apt_kernel_list | deb_kernel( + kernel_version, ansible_kernel) }}" + tags: + - kernel-inst + +- name: Ensure Packages Versions + apt: + name: "{{ apt_kernel_list | deb_kernel_pkg( + kernel_version, ansible_kernel, ansible_distribution, + ansible_architecture, item) }}" + loop: "{{ kernel_packages }}" + tags: + - kernel-inst + +- name: Ensure Any Other Kernel Packages Are Removed + apt: + name: "{{ apt_packages_list | deb_installed_kernel( + apt_kernel_list, kernel_version, ansible_kernel) }}" + state: absent + purge: true + notify: + - "Reboot Server" + tags: + - kernel-inst + +- name: Ensure Any Microcode Is Absent + apt: + name: "{{ absent_packages }}" + state: absent + purge: true + tags: + - kernel-inst \ No newline at end of file diff --git a/fdio.infra.ansible/roles/kernel_vm/files/initramfs_modules b/fdio.infra.ansible/roles/kernel_vm/files/initramfs_modules new file mode 100644 index 0000000000..00ae8e03e7 --- /dev/null +++ b/fdio.infra.ansible/roles/kernel_vm/files/initramfs_modules @@ -0,0 +1,4 @@ +9p +9pnet +9pnet_virtio +vfio-pci \ No newline at end of file diff --git a/fdio.infra.ansible/roles/kernel_vm/files/initramfs_resume b/fdio.infra.ansible/roles/kernel_vm/files/initramfs_resume new file mode 100644 index 0000000000..820819823b --- /dev/null +++ b/fdio.infra.ansible/roles/kernel_vm/files/initramfs_resume @@ -0,0 +1 @@ +RESUME=none \ No newline at end of file diff --git a/fdio.infra.ansible/roles/kernel_vm/tasks/main.yaml b/fdio.infra.ansible/roles/kernel_vm/tasks/main.yaml new file mode 100644 index 0000000000..4d1b306e64 --- /dev/null +++ b/fdio.infra.ansible/roles/kernel_vm/tasks/main.yaml @@ -0,0 +1,92 @@ +--- +# file: roles/kernel_vm/tasks/main.yaml + +- name: Inst - Backup remote initramfs modules + copy: + src: "/etc/initramfs-tools/modules" + dest: "/tmp/initramfs_modules.bkp" + remote_src: yes + ignore_errors: yes + register: __initramfs_modules_backuped + tags: + - kernel-inst-image + +- name: Inst - Backup remote initramfs resume config + copy: + src: "/etc/initramfs-tools/conf.d/resume" + dest: "/tmp/initramfs-resume.bkp" + remote_src: yes + ignore_errors: yes + register: __initramfs_resume_backuped + tags: + - kernel-inst-image + +- name: Inst - Update remote initramfs modules + copy: + src: "../files/initramfs_modules" + dest: "/etc/initramfs-tools/modules" + tags: + - kernel-inst-image + +- name: Inst - Update remote initramfs resume config + copy: + src: "../files/initramfs_resume" + dest: "/etc/initramfs-tools/conf.d/resume" + tags: + - kernel-inst-image + +- name: Inst - Create target kernel dir + file: + path: "/opt/boot" + state: "directory" + tags: + - kernel-inst-image + +- name: Inst - Build initrd image + shell: "update-initramfs -k {{ ansible_kernel }} -c -b /opt/boot" + tags: + - kernel-inst-image + +- name: Inst - Copy corresponding kernel img + copy: + src: "/boot/vmlinuz-{{ ansible_kernel }}" + dest: "/opt/boot/vmlinuz-{{ ansible_kernel }}" + remote_src: yes + tags: + - kernel-inst-image + +- name: Inst - Restore remote initramfs modules + copy: + src: "/tmp/initramfs_modules.bkp" + dest: "/etc/initramfs-tools/modules" + remote_src: yes + ignore_errors: yes + when: __initramfs_modules_backuped + tags: + - kernel-inst-image + +- name: Inst - Remove remote backup initramfs modules + file: + path: "/tmp/initramfs_modules.bkp" + state: "absent" + when: __initramfs_modules_backuped + tags: + - kernel-inst-image + +- name: Inst - Restore remote initramfs resume config + copy: + src: "/tmp/initramfs-resume.bkp" + dest: "/etc/initramfs-tools/conf.d/resume" + remote_src: yes + ignore_errors: yes + when: __initramfs_resume_backuped + tags: + - kernel-inst-image + +- name: Inst - Remove remote backup initramfs resume config + file: + path: "/tmp/initramfs-resume.bkp" + state: "absent" + when: __initramfs_resume_backuped + tags: + - kernel-inst-image diff --git a/fdio.infra.ansible/roles/kubernetes/defaults/main.yaml b/fdio.infra.ansible/roles/kubernetes/defaults/main.yaml new file mode 100644 index 0000000000..1a2f773950 --- /dev/null +++ b/fdio.infra.ansible/roles/kubernetes/defaults/main.yaml @@ -0,0 +1,15 @@ +--- +# file: roles/kubernetes/defaults/main.yaml + +# Version options. +kubernetes_version: "1.11.0-00" +kubernetes_apt_package_state: present + +# Service options. +kubernetes_service_state: started +kubernetes_service_enabled: true +kubernetes_restart_handler_state: restarted + +# APT options. +kubernetes_apt_repository: "deb http://apt.kubernetes.io/ kubernetes-xenial main" +kubernetes_apt_repository_state: present diff --git a/fdio.infra.ansible/roles/kubernetes/tasks/main.yaml b/fdio.infra.ansible/roles/kubernetes/tasks/main.yaml new file mode 100644 index 0000000000..160ffb8c06 --- /dev/null +++ b/fdio.infra.ansible/roles/kubernetes/tasks/main.yaml @@ -0,0 +1,14 @@ +--- +# file: roles/kubernetes/tasks/main.yaml + +- name: Kubernetes - Install distribution - release - machine prerequisites + include_tasks: '{{ ansible_distribution|lower }}_{{ ansible_distribution_release }}.yaml' + tags: install-kubernetes + +- name: Kubernetes - Apply kubelet parameter + lineinfile: + path: '/etc/default/kubelet' + state: 'present' + regexp: '^KUBELET_EXTRA_ARGS=*' + line: 'KUBELET_EXTRA_ARGS=--feature-gates HugePages=false' + tags: install-kubernetes diff --git a/fdio.infra.ansible/roles/kubernetes/tasks/ubuntu_bionic.yaml b/fdio.infra.ansible/roles/kubernetes/tasks/ubuntu_bionic.yaml new file mode 100644 index 0000000000..454e80e002 --- /dev/null +++ b/fdio.infra.ansible/roles/kubernetes/tasks/ubuntu_bionic.yaml @@ -0,0 +1,37 @@ +--- +# file: roles/kubernetes/tasks/ubuntu_bionic.yaml + +- name: Kubernetes repository - Dependencies + apt: + name: + - 'apt-transport-https' + - 'ca-certificates' + - 'software-properties-common' + state: 'present' + cache_valid_time: 3600 + install_recommends: False + tags: install-kubernetes + +- name: Kubernetes repository - Add an Apt signing key + apt_key: + url: 'https://packages.cloud.google.com/apt/doc/apt-key.gpg' + state: 'present' + tags: install-kubernetes + +- name: Kubernetes repository - Install APT repository + apt_repository: + repo: '{{ kubernetes_apt_repository }}' + state: '{{ kubernetes_apt_repository_state }}' + update_cache: yes + tags: install-kubernetes + +- name: Kubernetes - Install + apt: + name: + - 'kubernetes-cni=0.6.0-00' + - 'kubeadm={{ kubernetes_version }}' + - 'kubectl={{ kubernetes_version }}' + - 'kubelet={{ kubernetes_version }}' + state: '{{ kubernetes_apt_package_state }}' + force: True + tags: install-kubernetes diff --git a/fdio.infra.ansible/roles/mellanox/defaults/main.yaml b/fdio.infra.ansible/roles/mellanox/defaults/main.yaml new file mode 100644 index 0000000000..0961ec7df6 --- /dev/null +++ b/fdio.infra.ansible/roles/mellanox/defaults/main.yaml @@ -0,0 +1,21 @@ +--- +# file: roles/mellanox/defaults/main.yaml + +packages: "{{ packages_base + packages_by_distro[ansible_distribution | lower] + packages_by_arch[ansible_machine] }}" + +packages_base: + - [] + +packages_by_distro: + ubuntu: + - "build-essential" + - "libnl-3-dev" + - "libnl-route-3-dev" + +packages_by_arch: + aarch64: + - [] + x86_64: + - [] + +mellanox_version: "5.2-1.0.4.0" \ No newline at end of file diff --git a/fdio.infra.ansible/roles/mellanox/tasks/main.yaml b/fdio.infra.ansible/roles/mellanox/tasks/main.yaml new file mode 100644 index 0000000000..670282923a --- /dev/null +++ b/fdio.infra.ansible/roles/mellanox/tasks/main.yaml @@ -0,0 +1,67 @@ +--- +# file: roles/mellanox/tasks/main.yaml + +- name: Inst - Update Package Cache (APT) + apt: + update_cache: yes + cache_valid_time: 3600 + when: + - ansible_distribution|lower == 'ubuntu' + tags: + - mellanox-inst-prerequisites + +- name: Inst - Prerequisites + package: + name: "{{ packages | flatten(levels=1) }}" + state: latest + tags: + - mellanox-inst-prerequisites + +- name: Inst - Check Presence of Mellanox Hardware + shell: "lspci | grep Mellanox | awk '{print $1}'" + register: mellanox_pcis + failed_when: no + changed_when: no + tags: + - mellanox-inst + +- name: Inst - Get OFED + get_url: + url: "http://content.mellanox.com/ofed/MLNX_OFED-{{ mellanox_version }}/MLNX_OFED_LINUX-{{ mellanox_version }}-{{ ansible_distribution|lower }}{{ ansible_distribution_version }}-{{ ansible_machine }}.tgz" + dest: "/opt/MLNX_OFED_LINUX-{{ mellanox_version }}-{{ ansible_distribution|lower }}{{ ansible_distribution_version }}-{{ ansible_machine }}.tgz" + mode: 0644 + when: mellanox_pcis.stdout_lines | length > 0 + tags: + - mellanox-inst + +- name: Inst - Extract OFED + unarchive: + remote_src: true + src: "/opt/MLNX_OFED_LINUX-{{ mellanox_version }}-{{ ansible_distribution|lower }}{{ ansible_distribution_version }}-{{ ansible_machine }}.tgz" + dest: "/opt/" + creates: "/opt/MLNX_OFED_LINUX-{{ mellanox_version }}-{{ ansible_distribution|lower }}{{ ansible_distribution_version }}-{{ ansible_machine }}" + register: mellanox_firmware_extracted + when: mellanox_pcis.stdout_lines | length > 0 + tags: + - mellanox-inst + +- name: Inst - OFED + command: "./mlnxofedinstall --with-mft --dpdk --force --upstream-libs" + args: + chdir: "/opt/MLNX_OFED_LINUX-{{ mellanox_version }}-{{ ansible_distribution|lower }}{{ ansible_distribution_version }}-{{ ansible_machine }}" + when: mellanox_pcis.stdout_lines | length > 0 and mellanox_firmware_extracted + tags: + - mellanox-inst + +- name: Switch Infiniband to Ethernet + command: "mlxconfig --yes --dev {{ item }} set LINK_TYPE_P1=2 LINK_TYPE_P2=2" + with_items: "{{ mellanox_pcis.stdout_lines }}" + tags: + - mellanox-conf + +- name: FIX qemu-system removal + package: + name: "qemu-system" + state: latest + tags: + - mellanox-inst diff --git a/fdio.infra.ansible/roles/nomad/defaults/main.yaml b/fdio.infra.ansible/roles/nomad/defaults/main.yaml new file mode 100644 index 0000000000..864890c11e --- /dev/null +++ b/fdio.infra.ansible/roles/nomad/defaults/main.yaml @@ -0,0 +1,105 @@ +--- +# file: roles/nomad/defaults/main.yaml + +# Inst - Prerequisites. +packages: "{{ packages_base + packages_by_distro[ansible_distribution | lower] + packages_by_arch[ansible_machine] }}" + +packages_base: + - "cgroup-bin" + - "curl" + - "git" + - "libcgroup1" + - "unzip" + - "htop" +packages_by_distro: + ubuntu: + - [] +packages_by_arch: + aarch64: + - [] + x86_64: + - [] + +# Inst - Download Nomad. +nomad_architecture_map: + amd64: "amd64" + x86_64: "amd64" + armv7l: "arm" + aarch64: "arm64" + 32-bit: "386" + 64-bit: "amd64" +nomad_architecture: "{{ nomad_architecture_map[ansible_architecture] }}" +nomad_version: "0.12.0" +nomad_pkg: "nomad_{{ nomad_version }}_linux_{{ nomad_architecture }}.zip" +nomad_zip_url: "https://releases.hashicorp.com/nomad/{{ nomad_version }}/{{ nomad_pkg }}" + +# Inst - System paths. +nomad_bin_dir: "/usr/local/bin" +nomad_config_dir: "/etc/nomad.d" +nomad_data_dir: "/var/nomad" +nomad_inst_dir: "/opt" +nomad_lockfile: "/var/lock/subsys/nomad" +nomad_run_dir: "/var/run/nomad" +nomad_ssl_dir: "/etc/nomad.d/ssl" + +# Conf - Service. +nomad_node_role: "both" +nomad_restart_handler_state: "restarted" + +# Conf - User and group. +nomad_group: "nomad" +nomad_group_state: "present" +nomad_manage_group: true +nomad_manage_user: true +nomad_user: "nomad" +nomad_user_groups: [ docker, nomad, root ] +nomad_user_state: "present" + +# Conf - base.hcl +nomad_bind_addr: "0.0.0.0" +nomad_datacenter: "dc1" +nomad_disable_update_check: true +nomad_enable_debug: false +nomad_log_level: "INFO" +nomad_name: "{{ inventory_hostname }}" +nomad_region: "global" +nomad_syslog_enable: true + +# Conf - tls.hcl +nomad_ca_file: "{{ nomad_ssl_dir }}/ca.pem" +nomad_cert_file: "{{ nomad_ssl_dir }}/nomad.pem" +nomad_http: false +nomad_key_file: "{{ nomad_ssl_dir }}/nomad-key.pem" +nomad_rpc: false + +# Conf - client.hcl +nomad_certificates: + - src: "{{ vault_nomad_ca_file }}" + dest: "{{ nomad_ca_file }}" + - src: "{{ vault_nomad_cert_file }}" + dest: "{{ nomad_cert_file }}" + - src: "{{ vault_nomad_key_file }}" + dest: "{{ nomad_key_file }}" +nomad_node_class: "" +nomad_no_host_uuid: true +nomad_options: {} +nomad_servers: [] +nomad_volumes: [] + +# Conf - server.hcl +nomad_bootstrap_expect: 2 +nomad_encrypt: "" +nomad_retry_join: true + +# Conf - telemetry.hcl +nomad_disable_hostname: false +nomad_collection_interval: 60s +nomad_use_node_name: false +nomad_publish_allocation_metrics: true +nomad_publish_node_metrics: true +nomad_backwards_compatible_metrics: false +nomad_telemetry_provider_parameters: + prometheus_metrics: true + +# Conf - custom.hcl +# empty diff --git a/fdio.infra.ansible/roles/nomad/handlers/main.yaml b/fdio.infra.ansible/roles/nomad/handlers/main.yaml new file mode 100644 index 0000000000..f0bcee9142 --- /dev/null +++ b/fdio.infra.ansible/roles/nomad/handlers/main.yaml @@ -0,0 +1,10 @@ +--- +# file roles/nomad/handlers/main.yaml + +- name: Restart Nomad + systemd: + daemon_reload: true + enabled: true + name: "nomad" + state: "{{ nomad_restart_handler_state }}" + diff --git a/fdio.infra.ansible/roles/nomad/meta/main.yaml b/fdio.infra.ansible/roles/nomad/meta/main.yaml new file mode 100644 index 0000000000..9fc40d9ae1 --- /dev/null +++ b/fdio.infra.ansible/roles/nomad/meta/main.yaml @@ -0,0 +1,9 @@ +--- +# file: roles/nomad/meta/main.yaml + +# desc: Install nomad from stable branch and configure service. +# inst: Nomad +# conf: ? +# info: 1.0 - added role + +dependencies: [ docker ] diff --git a/fdio.infra.ansible/roles/nomad/tasks/main.yaml b/fdio.infra.ansible/roles/nomad/tasks/main.yaml new file mode 100644 index 0000000000..54e80513b8 --- /dev/null +++ b/fdio.infra.ansible/roles/nomad/tasks/main.yaml @@ -0,0 +1,192 @@ +--- +# file: roles/nomad/tasks/main.yaml + +- name: Inst - Update Package Cache (APT) + apt: + update_cache: yes + cache_valid_time: 3600 + when: + - ansible_distribution|lower == 'ubuntu' + tags: + - nomad-inst-prerequisites + +- name: Inst - Prerequisites + package: + name: "{{ packages | flatten(levels=1) }}" + state: latest + tags: + - nomad-inst-prerequisites + +- name: Conf - Add Nomad Group + group: + name: "{{ nomad_group }}" + state: "{{ nomad_group_state }}" + when: + - nomad_manage_group | bool + tags: + - nomad-conf-user + +- name: Conf - Add Nomad user + user: + name: "{{ nomad_user }}" + group: "{{ nomad_group }}" + groups: "{{ nomad_user_groups }}" + state: "{{ nomad_user_state }}" + system: true + when: + - nomad_manage_user | bool + tags: + - nomad-conf-user + +- name: Inst - Clean Nomad + file: + path: "{{ nomad_inst_dir }}/nomad" + state: "absent" + tags: + - nomad-inst-package + +- name: Inst - Download Nomad + get_url: + url: "{{ nomad_zip_url }}" + dest: "{{ nomad_inst_dir }}/{{ nomad_pkg }}" + tags: + - nomad-inst-package + +- name: Inst - Unarchive Nomad + unarchive: + src: "{{ nomad_inst_dir }}/{{ nomad_pkg }}" + dest: "{{ nomad_inst_dir }}/" + creates: "{{ nomad_inst_dir }}/nomad" + remote_src: true + tags: + - nomad-inst-package + +- name: Inst - Nomad + copy: + src: "{{ nomad_inst_dir }}/nomad" + dest: "{{ nomad_bin_dir }}" + owner: "{{ nomad_user }}" + group: "{{ nomad_group }}" + force: true + mode: 0755 + remote_src: true + tags: + - nomad-inst-package + +- name: Conf - Create Directories "{{ nomad_data_dir }}" + file: + dest: "{{ nomad_data_dir }}" + state: directory + owner: "{{ nomad_user }}" + group: "{{ nomad_group }}" + tags: + - nomad-conf + +- name: Conf - Create Directories "{{ nomad_ssl_dir }}" + file: + dest: "{{ nomad_ssl_dir }}" + state: directory + owner: "{{ nomad_user }}" + group: "{{ nomad_group }}" + tags: + - nomad-conf + +- name: Conf - Create Config Directory + file: + dest: "{{ nomad_config_dir }}" + state: directory + owner: "{{ nomad_user }}" + group: "{{ nomad_group }}" + mode: 0755 + tags: + - nomad-conf + +- name: Conf - Base Configuration + template: + src: base.hcl.j2 + dest: "{{ nomad_config_dir }}/base.hcl" + owner: "{{ nomad_user }}" + group: "{{ nomad_group }}" + mode: 0644 + tags: + - nomad-conf + +- name: Conf - Server Configuration + template: + src: server.hcl.j2 + dest: "{{ nomad_config_dir }}/server.hcl" + owner: "{{ nomad_user }}" + group: "{{ nomad_group }}" + mode: 0644 + when: + - nomad_node_server | bool + tags: + - nomad-conf + +- name: Conf - Client Configuration + template: + src: client.hcl.j2 + dest: "{{ nomad_config_dir }}/client.hcl" + owner: "{{ nomad_user }}" + group: "{{ nomad_group }}" + mode: 0644 + when: + - nomad_node_client | bool + tags: + - nomad-conf + +- name: Conf - TLS Configuration + template: + src: tls.hcl.j2 + dest: "{{ nomad_config_dir }}/tls.hcl" + owner: "{{ nomad_user }}" + group: "{{ nomad_group }}" + mode: 0644 + tags: + - nomad-conf + +- name: Conf - Telemetry Configuration + template: + src: telemetry.hcl.j2 + dest: "{{ nomad_config_dir }}/telemetry.hcl" + owner: "{{ nomad_user }}" + group: "{{ nomad_group }}" + mode: 0644 + tags: + - nomad-conf + +- name: Conf - Custom Configuration + template: + src: custom.json.j2 + dest: "{{ nomad_config_dir }}/custom.json" + owner: "{{ nomad_user }}" + group: "{{ nomad_group }}" + mode: 0644 + when: + - nomad_config_custom is defined + tags: + - nomad-conf + +- name: Conf - Copy Certificates And Keys + copy: + content: "{{ item.src }}" + dest: "{{ item.dest }}" + owner: "{{ nomad_user }}" + group: "{{ nomad_group }}" + mode: 0600 + no_log: true + loop: "{{ nomad_certificates | flatten(levels=1) }}" + tags: + - nomad-conf + +- name: Conf - System.d Script + template: + src: "nomad_systemd.service.j2" + dest: "/lib/systemd/system/nomad.service" + owner: "root" + group: "root" + mode: 0644 +# notify: +# - "Restart Nomad" + tags: + - nomad-conf diff --git a/fdio.infra.ansible/roles/nomad/templates/base.hcl.j2 b/fdio.infra.ansible/roles/nomad/templates/base.hcl.j2 new file mode 100644 index 0000000000..7badecf9e0 --- /dev/null +++ b/fdio.infra.ansible/roles/nomad/templates/base.hcl.j2 @@ -0,0 +1,11 @@ +name = "{{ nomad_name }}" +region = "{{ nomad_region }}" +datacenter = "{{ nomad_datacenter }}" + +bind_addr = "{{ nomad_bind_addr }}" +data_dir = "{{ nomad_data_dir }}" + +enable_syslog = {{ nomad_syslog_enable | bool | lower }} +enable_debug = {{ nomad_enable_debug | bool | lower }} +disable_update_check = {{ nomad_disable_update_check | bool | lower }} +log_level = "{{ nomad_log_level }}" diff --git a/fdio.infra.ansible/roles/nomad/templates/client.hcl.j2 b/fdio.infra.ansible/roles/nomad/templates/client.hcl.j2 new file mode 100644 index 0000000000..f245697a22 --- /dev/null +++ b/fdio.infra.ansible/roles/nomad/templates/client.hcl.j2 @@ -0,0 +1,31 @@ +client { + enabled = {{ nomad_node_client | bool | lower }} + no_host_uuid = {{ nomad_no_host_uuid | bool | lower }} + node_class = "{{ nomad_node_class }}" + + {% if nomad_cpu_total_compute is defined -%} + cpu_total_compute = {{ nomad_cpu_total_compute }} + {% endif -%} + + {% if nomad_servers -%} + servers = [ {% for ip_port in nomad_servers -%} "{{ ip_port }}" {% if not loop.last %},{% endif %}{%- endfor -%} ] + {% endif %} + + {% if nomad_options -%} + options = { + {% for key, value in nomad_options.items() %} + "{{ key }}" = "{{ value }}" + {% endfor -%} + } + {% endif %} + + {% if nomad_volumes -%} + {% for volume in nomad_volumes -%} + host_volume "{{ volume.name }}" { + path = "{{ volume.path }}" + read_only = {{ volume.read_only | bool | lower }} + } + {% endfor -%} + {% endif %} + +} diff --git a/fdio.infra.ansible/roles/nomad/templates/custom.hcl.j2 b/fdio.infra.ansible/roles/nomad/templates/custom.hcl.j2 new file mode 100644 index 0000000000..37ff6f3496 --- /dev/null +++ b/fdio.infra.ansible/roles/nomad/templates/custom.hcl.j2 @@ -0,0 +1,5 @@ +{% if nomad_config_custom -%} +{{ nomad_config_custom | to_nice_json }} +{% else %} +{} +{% endif %} diff --git a/fdio.infra.ansible/roles/nomad/templates/nomad_systemd.service.j2 b/fdio.infra.ansible/roles/nomad/templates/nomad_systemd.service.j2 new file mode 100644 index 0000000000..2a87c65063 --- /dev/null +++ b/fdio.infra.ansible/roles/nomad/templates/nomad_systemd.service.j2 @@ -0,0 +1,21 @@ +[Unit] +Description=Nomad Service +Documentation=https://www.nomadproject.io/docs/ +Wants=network-online.target +After=network-online.target + +[Service] +# TODO: Decrease privilege +ExecReload=/bin/kill -SIGHUP $MAINPID +ExecStart={{ nomad_bin_dir }}/nomad agent -config={{ nomad_config_dir }} +KillSignal=SIGTERM +LimitNOFILE=infinity +LimitNPROC=infinity +Restart=on-failure +RestartSec=1 +User=root +Group=root +Environment="GOMAXPROCS=2" + +[Install] +WantedBy=multi-user.target diff --git a/fdio.infra.ansible/roles/nomad/templates/server.hcl.j2 b/fdio.infra.ansible/roles/nomad/templates/server.hcl.j2 new file mode 100644 index 0000000000..b581de9ad0 --- /dev/null +++ b/fdio.infra.ansible/roles/nomad/templates/server.hcl.j2 @@ -0,0 +1,16 @@ +server { + enabled = {{ nomad_node_server | bool | lower }} + + {% if nomad_node_server | bool -%} + bootstrap_expect = {{ nomad_bootstrap_expect }} + {%- endif %} + + encrypt = "{{ nomad_encrypt }}" + + {% if nomad_retry_join | bool -%} + server_join { + retry_join = [ {% for ip_port in nomad_retry_servers -%} "{{ ip_port }}" {% if not loop.last %},{% endif %}{%- endfor -%} ] + } + {%- endif %} + +} diff --git a/fdio.infra.ansible/roles/nomad/templates/telemetry.hcl.j2 b/fdio.infra.ansible/roles/nomad/templates/telemetry.hcl.j2 new file mode 100644 index 0000000000..256c6999e9 --- /dev/null +++ b/fdio.infra.ansible/roles/nomad/templates/telemetry.hcl.j2 @@ -0,0 +1,20 @@ +telemetry { + # Telemetry provider parameters + {% for key, value in nomad_telemetry_provider_parameters.items() -%} + {% if value|bool -%} + {{ key }} = {{ value | bool | lower }} + {% elif value|string or value == "" -%} + {{ key }} = "{{ value }}" + {% else %} + {{ key }} = {{ value }} + {% endif -%} + {% endfor -%} + + # Common parameters + disable_hostname = {{ nomad_disable_hostname | bool | lower }} + collection_interval = "{{ nomad_collection_interval }}" + use_node_name = {{ nomad_use_node_name | bool | lower }} + publish_allocation_metrics = {{ nomad_publish_allocation_metrics | bool | lower }} + publish_node_metrics = {{ nomad_publish_node_metrics | bool | lower }} + backwards_compatible_metrics = {{ nomad_backwards_compatible_metrics | bool | lower }} +} diff --git a/fdio.infra.ansible/roles/nomad/templates/tls.hcl.j2 b/fdio.infra.ansible/roles/nomad/templates/tls.hcl.j2 new file mode 100644 index 0000000000..650765f1b1 --- /dev/null +++ b/fdio.infra.ansible/roles/nomad/templates/tls.hcl.j2 @@ -0,0 +1,12 @@ +{% if ( nomad_ca_file ) and + ( nomad_cert_file ) and + ( nomad_key_file ) +%} +tls { + http = {{ nomad_http | bool | lower }} + rpc = {{ nomad_rpc | bool | lower }} + ca_file = "{{ nomad_ca_file }}" + cert_file = "{{ nomad_cert_file }}" + key_file = "{{ nomad_key_file }}" +} +{% endif %} diff --git a/fdio.infra.ansible/roles/nomad/vars/main.yaml b/fdio.infra.ansible/roles/nomad/vars/main.yaml new file mode 100644 index 0000000000..a72222c992 --- /dev/null +++ b/fdio.infra.ansible/roles/nomad/vars/main.yaml @@ -0,0 +1,5 @@ +--- +# file: roles/nomad/vars/main.yaml + +nomad_node_client: "{{ (nomad_node_role == 'client') or (nomad_node_role == 'both') }}" +nomad_node_server: "{{ (nomad_node_role == 'server') or (nomad_node_role == 'both') }}" diff --git a/fdio.infra.ansible/roles/performance_tuning/defaults/main.yaml b/fdio.infra.ansible/roles/performance_tuning/defaults/main.yaml new file mode 100644 index 0000000000..2dad931e92 --- /dev/null +++ b/fdio.infra.ansible/roles/performance_tuning/defaults/main.yaml @@ -0,0 +1,20 @@ +--- +# file: roles/performance_tuning/defaults/main.yaml + +packages: "{{ packages_base + packages_by_distro[ansible_distribution|lower][ansible_distribution_release] + packages_by_arch[ansible_machine] }}" + +packages_base: + - "cpufrequtils" + +packages_by_distro: + ubuntu: + bionic: + - [] + focal: + - [] + +packages_by_arch: + aarch64: + - [] + x86_64: + - [] \ No newline at end of file diff --git a/fdio.infra.ansible/roles/performance_tuning/files/cpufrequtils b/fdio.infra.ansible/roles/performance_tuning/files/cpufrequtils new file mode 100644 index 0000000000..03070fefe1 --- /dev/null +++ b/fdio.infra.ansible/roles/performance_tuning/files/cpufrequtils @@ -0,0 +1 @@ +GOVERNOR="performance" diff --git a/fdio.infra.ansible/roles/performance_tuning/files/disable-turbo-boost.service b/fdio.infra.ansible/roles/performance_tuning/files/disable-turbo-boost.service new file mode 100644 index 0000000000..e04729de50 --- /dev/null +++ b/fdio.infra.ansible/roles/performance_tuning/files/disable-turbo-boost.service @@ -0,0 +1,10 @@ +[Unit] +Description=Disable Turbo Boost on Intel CPU + +[Service] +ExecStart=/bin/sh -c 'for core in `cat /proc/cpuinfo | grep processor | awk \'{print $3}\'`; do sudo wrmsr -p$core 0x1a0 0x4000850089; done' +ExecStop=/bin/sh -c 'for core in `cat /proc/cpuinfo | grep processor | awk \'{print $3}\'`; do sudo wrmsr -p$core 0x1a0 0x850089; done' +RemainAfterExit=yes + +[Install] +WantedBy=sysinit.target diff --git a/fdio.infra.ansible/roles/performance_tuning/files/irqbalance b/fdio.infra.ansible/roles/performance_tuning/files/irqbalance new file mode 100644 index 0000000000..861be02fb3 --- /dev/null +++ b/fdio.infra.ansible/roles/performance_tuning/files/irqbalance @@ -0,0 +1,25 @@ +# irqbalance is a daemon process that distributes interrupts across +# CPUS on SMP systems. The default is to rebalance once every 10 +# seconds. This is the environment file that is specified to systemd via the +# EnvironmentFile key in the service unit file (or via whatever method the init +# system you're using has. +# +# ONESHOT=yes +# after starting, wait for a minute, then look at the interrupt +# load and balance it once; after balancing exit and do not change +# it again. +#IRQBALANCE_ONESHOT= + +# +# IRQBALANCE_BANNED_CPUS +# 64 bit bitmask which allows you to indicate which cpu's should +# be skipped when reblancing irqs. Cpu numbers which have their +# corresponding bits set to one in this mask will not have any +# irq's assigned to them on rebalance +# +IRQBALANCE_BANNED_CPUS="{{ ansible_processor_cores | irqbalance_banned_cpu_mask(ansible_processor_count, ansible_processor_threads_per_core) }}" +# +# IRQBALANCE_ARGS +# append any args here to the irqbalance daemon as documented in the man page +# +#IRQBALANCE_ARGS= diff --git a/fdio.infra.ansible/roles/performance_tuning/filter_plugins/main.py b/fdio.infra.ansible/roles/performance_tuning/filter_plugins/main.py new file mode 100644 index 0000000000..d76f6fe166 --- /dev/null +++ b/fdio.infra.ansible/roles/performance_tuning/filter_plugins/main.py @@ -0,0 +1,29 @@ +"""Extra Ansible filters""" + +def irqbalance_banned_cpu_mask( + processor_cores, processor_count, processor_threads_per_core): + """ + Return irqbalance CPU mask. + Args: + processor_cores (int): Physical processor unit. + processor_counts (int): Processors per physical unit. + processor_threads_per_core (int): Threads per physical unit. + Returns: + str: irqbalance_banned_cpus. + """ + mask = int("1" * 128, 2) + + for i in range(processor_count * processor_threads_per_core): + mask &= ~(1 << i * processor_cores) + + import re + return ",".join(re.findall('.{1,8}', str(hex(mask))[2:])) + + +class FilterModule(object): + """Return filter plugin""" + + @staticmethod + def filters(): + """Return filter""" + return {'irqbalance_banned_cpu_mask': irqbalance_banned_cpu_mask} diff --git a/fdio.infra.ansible/roles/performance_tuning/handlers/main.yaml b/fdio.infra.ansible/roles/performance_tuning/handlers/main.yaml new file mode 100644 index 0000000000..fa2876b7ac --- /dev/null +++ b/fdio.infra.ansible/roles/performance_tuning/handlers/main.yaml @@ -0,0 +1,13 @@ +--- +# file roles/performance_tuning/handlers/main.yaml + +- name: Update GRUB + command: update-grub + tags: + - update-grub + +- name: Reboot server + reboot: + reboot_timeout: 3600 + tags: + - reboot-server diff --git a/fdio.infra.ansible/roles/performance_tuning/tasks/main.yaml b/fdio.infra.ansible/roles/performance_tuning/tasks/main.yaml new file mode 100644 index 0000000000..e9cdd0d819 --- /dev/null +++ b/fdio.infra.ansible/roles/performance_tuning/tasks/main.yaml @@ -0,0 +1,189 @@ +--- +# file: roles/performance_tuning/tasks/main.yaml + +- name: Inst - Update Package Cache (APT) + apt: + update_cache: yes + cache_valid_time: 3600 + when: + - ansible_distribution|lower == 'ubuntu' + tags: + - perf-inst-prerequisites + +- name: Inst - Machine Prerequisites + package: + name: "{{ packages | flatten(levels=1) }}" + state: latest + tags: + - perf-inst-prerequisites + +- name: Conf - Turbo Boost + import_tasks: turbo_boost.yaml + when: > + cpu_microarchitecture == "skylake" or + cpu_microarchitecture == "cascadelake" + tags: + - perf-conf-turbo-boost + +- name: Conf - Adjust nr_hugepages + # change the minimum size of the hugepage pool. + # 2G VPP, 4GB per VNF/CNF, 2G reserve + sysctl: + name: "vm.nr_hugepages" + value: "{{ sysctl.vm.nr_hugepages }}" + state: "present" + sysctl_file: "/etc/sysctl.d/90-csit.conf" + reload: "yes" + tags: + - perf-conf-sysctl + +- name: Conf - Adjust max_map_count + # this file contains the maximum number of memory map areas a process + # may have. memory map areas are used as a side-effect of calling + # malloc, directly by mmap and mprotect, and also when loading shared + # libraries. + # + # while most applications need less than a thousand maps, certain + # programs, particularly malloc debuggers, may consume lots of them, + # e.g., up to one or two maps per allocation. + # must be greater than or equal to (2 * vm.nr_hugepages). + sysctl: + name: "vm.max_map_count" + value: "{{ sysctl.vm.nr_hugepages * 4 }}" + state: "present" + sysctl_file: "/etc/sysctl.d/90-csit.conf" + reload: "yes" + tags: + - perf-conf-sysctl + +- name: Conf - Adjust hugetlb_shm_group + # hugetlb_shm_group contains group id that is allowed to create sysv + # shared memory segment using hugetlb page. + sysctl: + name: "vm.hugetlb_shm_group" + value: "1000" + state: "present" + sysctl_file: "/etc/sysctl.d/90-csit.conf" + reload: "yes" + tags: + - perf-conf-sysctl + +- name: Conf - Adjust swappiness + # this control is used to define how aggressive the kernel will swap + # memory pages. higher values will increase agressiveness, lower values + # decrease the amount of swap. a value of 0 instructs the kernel not to + # initiate swap until the amount of free and file-backed pages is less + # than the high water mark in a zone. + sysctl: + name: "vm.swappiness" + value: "0" + state: "present" + sysctl_file: "/etc/sysctl.d/90-csit.conf" + reload: "yes" + tags: + - perf-conf-sysctl + +- name: Conf - Adjust shmmax + # shared memory max must be greator or equal to the total size of hugepages. + # for 2mb pages, totalhugepagesize = vm.nr_hugepages * 2 * 1024 * 1024 + # if the existing kernel.shmmax setting (cat /sys/proc/kernel/shmmax) + # is greater than the calculated totalhugepagesize then set this parameter + # to current shmmax value. + sysctl: + name: "kernel.shmmax" + value: "{{ sysctl.vm.nr_hugepages * 2 * 1024 * 1024 }}" + state: "present" + sysctl_file: "/etc/sysctl.d/90-csit.conf" + reload: "yes" + tags: + - perf-conf-sysctl + +- name: Conf - Adjust watchdog_cpumask + # this value can be used to control on which cpus the watchdog may run. + # the default cpumask is all possible cores, but if no_hz_full is + # enabled in the kernel config, and cores are specified with the + # nohz_full= boot argument, those cores are excluded by default. + # offline cores can be included in this mask, and if the core is later + # brought online, the watchdog will be started based on the mask value. + # + # typically this value would only be touched in the nohz_full case + # to re-enable cores that by default were not running the watchdog, + # if a kernel lockup was suspected on those cores. + sysctl: + name: "kernel.watchdog_cpumask" + value: "{{ sysctl.kernel.watchdog_cpumask }}" + state: "present" + sysctl_file: "/etc/sysctl.d/90-csit.conf" + reload: "yes" + tags: + - perf-conf-sysctl + +- name: Conf - Adjust randomize_va_space + # this option can be used to select the type of process address + # space randomization that is used in the system, for architectures + # that support this feature. + # 0 - turn the process address space randomization off. this is the + # default for architectures that do not support this feature anyways, + # and kernels that are booted with the "norandmaps" parameter. + sysctl: + name: "kernel.randomize_va_space" + value: "0" + state: "present" + sysctl_file: "/etc/sysctl.d/90-csit.conf" + reload: "yes" + tags: + - perf-conf-sysctl + +- name: Conf - Cpufrequtils + copy: + src: "files/cpufrequtils" + dest: "/etc/default/cpufrequtils" + owner: "root" + group: "root" + mode: 0644 + tags: + - perf-conf-cpufrequtils + +- name: Conf - Irqbalance + template: + src: "files/irqbalance" + dest: "/etc/default/irqbalance" + owner: "root" + group: "root" + mode: 0644 + tags: + - perf-conf-irqbalance + +- name: Conf - Set Ondemand Service To Disable + service: + name: "ondemand" + enabled: "no" + tags: + - perf-conf-ondemand + +- name: Conf - Kernel Parameters + lineinfile: + path: "/etc/default/grub" + state: "present" + regexp: "^GRUB_CMDLINE_LINUX=" + line: "GRUB_CMDLINE_LINUX=\"{% for key, value in grub.items() %}{% if value is sameas true %}{{key}} {% else %}{{key}}={{value}} {% endif %}{% endfor %}\"" + notify: + - "Update GRUB" + tags: + - perf-conf-grub + +- meta: flush_handlers + +- name: Conf - Load Kernel Modules By Default + lineinfile: + path: "/etc/modules" + state: "present" + line: "{{ item }}" + with_items: + - "vfio-pci" + notify: + - "Reboot Server" + tags: + - perf-conf-load-kernel-modules + +- meta: flush_handlers \ No newline at end of file diff --git a/fdio.infra.ansible/roles/performance_tuning/tasks/turbo_boost.yaml b/fdio.infra.ansible/roles/performance_tuning/tasks/turbo_boost.yaml new file mode 100644 index 0000000000..7f69365a2d --- /dev/null +++ b/fdio.infra.ansible/roles/performance_tuning/tasks/turbo_boost.yaml @@ -0,0 +1,44 @@ +--- +# file: roles/performance_tuning/tasks/turbo_boost.yaml + +- name: Inst - Update Package Cache (APT) + apt: + update_cache: yes + cache_valid_time: 3600 + when: + - ansible_distribution|lower == 'ubuntu' + tags: + - turbo-inst-prerequisites + +- name: Inst - msr-tools + package: + name: + - "msr-tools" + state: latest + tags: + - turbo-inst-prerequisites + +- name: Conf - Load msr By Default + lineinfile: + path: "/etc/modules" + state: "present" + line: "msr" + tags: + - turbo-conf-msr + +- name: Conf - Custom Startup Service Hook + copy: + src: "files/disable-turbo-boost.service" + dest: "/etc/systemd/system/disable-turbo-boost.service" + owner: "root" + group: "root" + mode: 0644 + tags: + - turbo-conf-msr + +- name: Conf - Custom Startup Service Hook Enable + service: + name: "disable-turbo-boost" + enabled: yes + tags: + - turbo-conf-msr diff --git a/fdio.infra.ansible/roles/prometheus_exporter/defaults/main.yaml b/fdio.infra.ansible/roles/prometheus_exporter/defaults/main.yaml new file mode 100644 index 0000000000..eb2b94cb26 --- /dev/null +++ b/fdio.infra.ansible/roles/prometheus_exporter/defaults/main.yaml @@ -0,0 +1,17 @@ +--- +# file: roles/prometheus_exporter/defaults/main.yaml + +# Inst - Exporters. +ne_packages: "{{ ne_packages_by_distro[ansible_distribution | lower][ansible_machine] }}" + +ne_packages_by_distro: + ubuntu: + aarch64: "http://ports.ubuntu.com/pool/universe/p/prometheus-node-exporter/prometheus-node-exporter_1.0.1+ds-1_arm64.deb" + x86_64: "http://archive.ubuntu.com/ubuntu/pool/universe/p/prometheus-node-exporter/prometheus-node-exporter_1.0.1+ds-1_amd64.deb" + +be_packages: "{{ be_packages_by_distro[ansible_distribution | lower][ansible_machine] }}" + +be_packages_by_distro: + ubuntu: + aarch64: "http://ports.ubuntu.com/pool/universe/p/prometheus-blackbox-exporter/prometheus-blackbox-exporter_0.17.0+ds-1_arm64.deb" + x86_64: "http://archive.ubuntu.com/ubuntu/pool/universe/p/prometheus-blackbox-exporter/prometheus-blackbox-exporter_0.17.0+ds-1_amd64.deb" diff --git a/fdio.infra.ansible/roles/prometheus_exporter/files/blackbox.yml b/fdio.infra.ansible/roles/prometheus_exporter/files/blackbox.yml new file mode 100644 index 0000000000..f61c26e1a8 --- /dev/null +++ b/fdio.infra.ansible/roles/prometheus_exporter/files/blackbox.yml @@ -0,0 +1,25 @@ +modules: + http_2xx: + prober: http + timeout: 5s + http: + valid_http_versions: ["HTTP/1.1", "HTTP/2.0"] + no_follow_redirects: false + fail_if_ssl: false + fail_if_not_ssl: true + tls_config: + insecure_skip_verify: false + preferred_ip_protocol: "ip4" + icmp_v4: + prober: icmp + timeout: 5s + icmp: + preferred_ip_protocol: "ip4" + dns_udp: + prober: dns + timeout: 5s + dns: + query_name: "jenkins.fd.io" + query_type: "A" + valid_rcodes: + - NOERROR \ No newline at end of file diff --git a/fdio.infra.ansible/roles/prometheus_exporter/handlers/main.yaml b/fdio.infra.ansible/roles/prometheus_exporter/handlers/main.yaml new file mode 100644 index 0000000000..9c374eaa61 --- /dev/null +++ b/fdio.infra.ansible/roles/prometheus_exporter/handlers/main.yaml @@ -0,0 +1,16 @@ +--- +# file roles/prometheus_exporter/handlers/main.yaml + +- name: Restart Prometheus Node Exporter + systemd: + daemon_reload: true + enabled: true + name: "prometheus-node-exporter" + state: "restarted" + +- name: Restart Prometheus Blackbox Exporter + systemd: + daemon_reload: true + enabled: true + name: "prometheus-blackbox-exporter" + state: "restarted" \ No newline at end of file diff --git a/fdio.infra.ansible/roles/prometheus_exporter/tasks/main.yaml b/fdio.infra.ansible/roles/prometheus_exporter/tasks/main.yaml new file mode 100644 index 0000000000..b38215c4a2 --- /dev/null +++ b/fdio.infra.ansible/roles/prometheus_exporter/tasks/main.yaml @@ -0,0 +1,15 @@ +--- +# file: roles/prometheus_exporter/tasks/main.yaml + +- include_tasks: "{{ ansible_distribution|lower }}_{{ ansible_distribution_release }}.yaml" + tags: + - prometheus-inst + +- name: Conf - Prometheus Blackbox Exporter + copy: + src: 'files/blackbox.yml' + dest: '/etc/prometheus/blackbox.yml' + notify: + - "Restart Prometheus Blackbox Exporter" + tags: + - prometheus-conf-blackbox-exporter \ No newline at end of file diff --git a/fdio.infra.ansible/roles/prometheus_exporter/tasks/ubuntu_bionic.yaml b/fdio.infra.ansible/roles/prometheus_exporter/tasks/ubuntu_bionic.yaml new file mode 100644 index 0000000000..566753e272 --- /dev/null +++ b/fdio.infra.ansible/roles/prometheus_exporter/tasks/ubuntu_bionic.yaml @@ -0,0 +1,33 @@ +--- +# file: roles/prometheus_exporter/tasks/ubuntu_bionic.yaml + +- name: Inst - Update Package Cache (APT) + apt: + update_cache: yes + cache_valid_time: 3600 + tags: + - prometheus-inst-prerequisites + +- name: Inst - Prerequisites + package: + name: "init-system-helpers" + default_release: "bionic-backports" + state: latest + tags: + - prometheus-inst-prerequisites + +- name: Inst - Prometheus Node Exporter + apt: + deb: "{{ ne_packages }}" + notify: + - "Restart Prometheus Node Exporter" + tags: + - prometheus-inst-node-exporter + +- name: Inst - Prometheus Blackbox Exporter + apt: + deb: "{{ be_packages }}" + notify: + - "Restart Prometheus Blackbox Exporter" + tags: + - prometheus-inst-blackbox-exporter \ No newline at end of file diff --git a/fdio.infra.ansible/roles/python_env/defaults/main.yaml b/fdio.infra.ansible/roles/python_env/defaults/main.yaml new file mode 100644 index 0000000000..342d1c3d33 --- /dev/null +++ b/fdio.infra.ansible/roles/python_env/defaults/main.yaml @@ -0,0 +1,41 @@ +--- +# file: roles/common/defaults/main.yaml + +packages: "{{ packages_base + packages_by_distro[ansible_distribution|lower][ansible_distribution_release] + packages_by_arch[ansible_machine] }}" + +packages_base: + - "virtualenv" + +packages_by_distro: + ubuntu: + bionic: + - "python-all" + - "python-apt" + - "python-cffi" + - "python-cffi-backend" + - "python-dev" + - "python-pip" + - "python-setuptools" + - "python3-all" + - "python3-apt" + - "python3-cffi" + - "python3-cffi-backend" + - "python3-dev" + - "python3-pip" + - "python3-pyelftools" + - "python3-setuptools" + focal: + - "python3-all" + - "python3-apt" + - "python3-cffi" + - "python3-cffi-backend" + - "python3-dev" + - "python3-pip" + - "python3-pyelftools" + - "python3-setuptools" + +packages_by_arch: + aarch64: + - [] + x86_64: + - [] \ No newline at end of file diff --git a/fdio.infra.ansible/roles/python_env/tasks/main.yaml b/fdio.infra.ansible/roles/python_env/tasks/main.yaml new file mode 100644 index 0000000000..cddfe63655 --- /dev/null +++ b/fdio.infra.ansible/roles/python_env/tasks/main.yaml @@ -0,0 +1,82 @@ +--- +# file: roles/python_env/tasks/main.yaml + +- name: Inst - Update package cache (apt) + apt: + update_cache: yes + cache_valid_time: 3600 + when: + - ansible_distribution|lower == 'ubuntu' + tags: + - common-inst-prerequisites + +- name: Inst - Prerequisites + package: + name: "{{ packages | flatten(levels=1) }}" + state: latest + tags: + - common-inst-prerequisites + +- name: Inst - CSIT PIP requirements + pip: + name: + - "ecdsa==0.13.3" + - "paramiko==2.6.0" + - "pycrypto==2.6.1" + - "pypcap==1.2.3" + - "PyYAML==5.1.1" + - "requests==2.22.0" + - "robotframework==3.1.2" + - "scapy==2.4.3" + - "scp==0.13.2" + - "ansible==2.10.7" + - "dill==0.3.3" + - "numpy==1.17.3" + - "hdrhistogram==0.6.1" + - "plotly==4.1.1" + - "PTable==0.9.2" + - "Sphinx==2.2.1" + - "sphinx-rtd-theme==0.4.0" + - "sphinxcontrib-programoutput==0.15" + - "sphinxcontrib-robotdoc==0.11.0" + - "alabaster==0.7.12" + - "Babel==2.7.0" + - "bcrypt==3.1.7" + - "certifi==2019.9.11" + - "cffi==1.13.2" + - "chardet==3.0.4" + - "cryptography==2.8" + - "docutils==0.15.2" + - "future==0.18.2" + - "idna==2.8" + - "imagesize==1.1.0" + - "Jinja2==2.10.3" + - "MarkupSafe==1.1.1" + - "packaging==19.2" + - "pbr==5.4.3" + - "pycparser==2.19" + - "Pygments==2.4.2" + - "PyNaCl==1.3.0" + - "pyparsing==2.4.4" + - "python-dateutil==2.8.1" + - "pytz==2019.3" + - "retrying==1.3.3" + - "six==1.13.0" + - "snowballstemmer==2.0.0" + - "sphinxcontrib-applehelp==1.0.1" + - "sphinxcontrib-devhelp==1.0.1" + - "sphinxcontrib-htmlhelp==1.0.2" + - "sphinxcontrib-jsmath==1.0.1" + - "sphinxcontrib-qthelp==1.0.2" + - "sphinxcontrib-serializinghtml==1.1.3" + - "urllib3==1.25.6" + tags: + - common-inst-pip + +- name: Inst - CSIT PIP requirements - Pandas and SciPy workaround + pip: + name: + - "pandas==0.25.3" + - "scipy==1.5.4" + tags: + - common-inst-pip diff --git a/fdio.infra.ansible/roles/tg/files/csit-initialize-docker-tg.service b/fdio.infra.ansible/roles/tg/files/csit-initialize-docker-tg.service new file mode 100644 index 0000000000..11911201d5 --- /dev/null +++ b/fdio.infra.ansible/roles/tg/files/csit-initialize-docker-tg.service @@ -0,0 +1,12 @@ +[Unit] +Description=CSIT Initialize Docker TG +After=network.target + +[Service] +Type=oneshot +RemainAfterExit=True +ExecStart=/usr/local/bin/csit-initialize-docker-tg.sh start 2 +ExecStop=/usr/local/bin/csit-initialize-docker-tg.sh stop + +[Install] +WantedBy=default.target diff --git a/fdio.infra.ansible/roles/tg/files/csit-initialize-docker-tg.sh b/fdio.infra.ansible/roles/tg/files/csit-initialize-docker-tg.sh new file mode 100755 index 0000000000..0120795e9c --- /dev/null +++ b/fdio.infra.ansible/roles/tg/files/csit-initialize-docker-tg.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# CSIT SRIOV VF initialization and isolation. + +set -euo pipefail + +case "${1:-start}" in + "start" ) + # Run TG + for cnt in $(seq 1 ${2:-1}); do + docker network create --driver bridge csit-nw-tg${cnt} || true + # If the IMAGE is not already loaded then docker run will pull the + # IMAGE, and all image dependencies, before it starts the container. + dcr_image="csit_sut-ubuntu2004:local" + # Run the container in the background and print the new container + # ID. + dcr_stc_params="--detach=true " + # Give extended privileges to this container. A "privileged" + # container is given access to all devices and able to run nested + # containers. + dcr_stc_params+="--privileged " + # Publish all exposed ports to random ports on the host interfaces. + dcr_stc_params+="--publish 600${cnt}:2222 " + # Automatically remove the container when it exits. + dcr_stc_params+="--rm " + # Size of /dev/shm. + dcr_stc_params+="--shm-size 4G " + # Mount vfio to be able to bind to see binded interfaces. We cannot + # use --device=/dev/vfio as this does not see newly binded + # interfaces. + dcr_stc_params+="--volume /dev:/dev " + # Mount /opt/boot/ where VM kernel and initrd are located. + dcr_stc_params+="--volume /opt:/opt " + # Mount host hugepages for VMs. + dcr_stc_params+="--volume /dev/hugepages:/dev/hugepages " + + params=(${dcr_stc_params} --name csit-tg-"${cnt}" "${dcr_image}") + docker run --network=csit-nw-tg${cnt} "${params[@]}" + done + ;; + "stop" ) + docker rm --force $(docker ps --all --quiet --filter name=csit) + docker network rm $(docker network ls --filter name=csit --quiet) + ;; +esac diff --git a/fdio.infra.ansible/roles/tg/handlers/main.yaml b/fdio.infra.ansible/roles/tg/handlers/main.yaml new file mode 100644 index 0000000000..e13e67b2c1 --- /dev/null +++ b/fdio.infra.ansible/roles/tg/handlers/main.yaml @@ -0,0 +1,10 @@ +--- +# file: roles/tg/handlers/main.yaml + +- name: Start csit-initialize-docker-tg.service + systemd: + enabled: yes + state: "started" + name: "csit-initialize-docker-tg.service" + tags: + - docker-tg diff --git a/fdio.infra.ansible/roles/tg/tasks/main.yaml b/fdio.infra.ansible/roles/tg/tasks/main.yaml new file mode 100644 index 0000000000..4e79dabfab --- /dev/null +++ b/fdio.infra.ansible/roles/tg/tasks/main.yaml @@ -0,0 +1,30 @@ +--- +# file: roles/tg/tasks/main.yaml + +- name: Conf - csit-initialize-docker-tg.sh + copy: + src: "files/csit-initialize-docker-tg.sh" + dest: "/usr/local/bin/csit-initialize-docker-tg.sh" + owner: "root" + group: "root" + mode: 0744 + when: + - docker_tg is defined + tags: + - tg-conf-docker + +- name: Conf - Start csit-initialize-docker-tg.service + copy: + src: "files/csit-initialize-docker-tg.service" + dest: "/etc/systemd/system/" + owner: "root" + group: "root" + mode: 0644 + notify: + - "Start csit-initialize-docker-tg.service" + when: + - docker_tg is defined + tags: + - tg-conf-docker + +- meta: flush_handlers diff --git a/fdio.infra.ansible/roles/topology/tasks/main.yaml b/fdio.infra.ansible/roles/topology/tasks/main.yaml new file mode 100644 index 0000000000..cf3eb5367f --- /dev/null +++ b/fdio.infra.ansible/roles/topology/tasks/main.yaml @@ -0,0 +1,9 @@ +--- +# file: roles/topology/tasks/main.yaml + +- name: Create topology file + template: + src: "templates/topology_{{ cloud_topology }}.j2" + dest: "../../../../topologies/available/{{ cloud_topology }}_{{ testbed_name }}.yaml" + tags: + - create-topology-file diff --git a/fdio.infra.ansible/roles/topology/templates/topology_2n_aws_c5n.j2 b/fdio.infra.ansible/roles/topology/templates/topology_2n_aws_c5n.j2 new file mode 100644 index 0000000000..1d99a34994 --- /dev/null +++ b/fdio.infra.ansible/roles/topology/templates/topology_2n_aws_c5n.j2 @@ -0,0 +1,56 @@ +--- +metadata: + version: 0.1 + schema: + - resources/topology_schemas/2_node_topology.sch.yaml + - resources/topology_schemas/topology.sch.yaml + tags: [hw, 2-node] + +nodes: + TG: + type: TG + subtype: TREX + host: "{{ tg_public_ip }}" + arch: x86_64 + port: 22 + username: testuser + password: Csit1234 + interfaces: + port1: + # tg_instance/p1 - 50GE port1 on ENA NIC. + mac_address: {{ tg_if1_mac }} + pci_address: "0000:00:06.0" + link: link1 + model: Amazon-Nitro-50G + port2: + # tg_instance/p2 - 50GE port2 on ENA NIC. + mac_address: {{ tg_if2_mac }} + pci_address: "0000:00:07.0" + link: link2 + model: Amazon-Nitro-50G + DUT1: + type: DUT + host: "{{ dut1_public_ip }}" + arch: x86_64 + port: 22 + username: testuser + password: Csit1234 + uio_driver: vfio-pci + honeycomb: + user: admin + passwd: admin + port: 8183 + netconf_port: 2831 + interfaces: + port1: + # dut1_instance/p1 - 50GE port1 on ENA NIC. + mac_address: {{ dut1_if1_mac }} + pci_address: "0000:00:06.0" + link: link1 + model: Amazon-Nitro-50G + port2: + # dut1_instance/p2 - 50GE port2 on ENA NIC. + mac_address: {{ dut1_if2_mac }} + pci_address: "0000:00:07.0" + link: link2 + model: Amazon-Nitro-50G diff --git a/fdio.infra.ansible/roles/topology/templates/topology_3n_aws_c5n.j2 b/fdio.infra.ansible/roles/topology/templates/topology_3n_aws_c5n.j2 new file mode 100644 index 0000000000..631b0be63b --- /dev/null +++ b/fdio.infra.ansible/roles/topology/templates/topology_3n_aws_c5n.j2 @@ -0,0 +1,83 @@ +--- +metadata: + version: 0.1 + schema: + - resources/topology_schemas/3_node_topology.sch.yaml + - resources/topology_schemas/topology.sch.yaml + tags: [hw, 3-node] + +nodes: + TG: + type: TG + subtype: TREX + host: "{{ tg_public_ip }}" + arch: x86_64 + port: 22 + username: testuser + password: Csit1234 + interfaces: + port1: + # tg_instance/p1 - 50GE port1 on ENA NIC. + mac_address: {{ tg_if1_mac }} + pci_address: "0000:00:06.0" + link: link1 + model: Amazon-Nitro-50G + port2: + # tg_instance/p2 - 50GE port2 on ENA NIC. + mac_address: {{ tg_if2_mac }} + pci_address: "0000:00:07.0" + link: link2 + model: Amazon-Nitro-50G + DUT1: + type: DUT + host: "{{ dut1_public_ip }}" + arch: x86_64 + port: 22 + username: testuser + password: Csit1234 + uio_driver: vfio-pci + honeycomb: + user: admin + passwd: admin + port: 8183 + netconf_port: 2831 + interfaces: + port1: + # dut1_instance/p1 - 50GE port1 on ENA NIC. + mac_address: {{ dut1_if1_mac }} + pci_address: "0000:00:06.0" + link: link1 + model: Amazon-Nitro-50G + port2: + # dut1_instance/p2 - 50GE port2 on ENA NIC. + mac_address: {{ dut1_if2_mac }} + pci_address: "0000:00:07.0" + link: link21 + model: Amazon-Nitro-50G + DUT2: + type: DUT + host: "{{ dut2_public_ip }}" + arch: x86_64 + port: 22 + username: testuser + password: Csit1234 + uio_driver: vfio-pci + honeycomb: + user: admin + passwd: admin + port: 8183 + netconf_port: 2831 + interfaces: + port1: + # dut2_instance/p1 - 50GE port1 on ENA NIC. + mac_address: {{ dut2_if1_mac }} + pci_address: "0000:00:06.0" + link: link21 + model: Amazon-Nitro-50G + port2: + # dut2_instance/p2 - 50GE port1 on ENA NIC. + mac_address: {{ dut2_if2_mac }} + pci_address: "0000:00:07.0" + link: link2 + model: Amazon-Nitro-50G + diff --git a/fdio.infra.ansible/roles/topology/templates/topology_3n_azure_Fsv2.j2 b/fdio.infra.ansible/roles/topology/templates/topology_3n_azure_Fsv2.j2 new file mode 100644 index 0000000000..e4dd6cdbf2 --- /dev/null +++ b/fdio.infra.ansible/roles/topology/templates/topology_3n_azure_Fsv2.j2 @@ -0,0 +1,82 @@ +--- +metadata: + version: 0.1 + schema: + - resources/topology_schemas/3_node_topology.sch.yaml + - resources/topology_schemas/topology.sch.yaml + tags: [hw, 3-node] + +nodes: + TG: + type: TG + subtype: TREX + host: "{{ tg_public_ip }}" + arch: x86_64 + port: 22 + username: testuser + password: Csit1234 + interfaces: + port1: + # tg_instance/p1 - 40GE port1 on Mellanox NIC. + mac_address: "{{ tg_if1_mac | lower | replace('-',':') }}" + pci_address: "0002:00:02.0" + link: link1 + model: Azure-MLX-40G + port2: + # tg_instance/p2 - 40GE port2 on Mellanox NIC. + mac_address: "{{ tg_if2_mac | lower | replace('-',':') }}" + pci_address: "0003:00:02.0" + link: link2 + model: Azure-MLX-40G + DUT1: + type: DUT + host: "{{ dut1_public_ip }}" + arch: x86_64 + port: 22 + username: testuser + password: Csit1234 + uio_driver: vfio-pci + honeycomb: + user: admin + passwd: admin + port: 8183 + netconf_port: 2831 + interfaces: + port1: + # dut1_instance/p1 - 40GE port1 on Mellanox NIC. + mac_address: "{{ dut1_if1_mac | lower | replace('-',':') }}" + pci_address: "0002:00:02.0" + link: link1 + model: Azure-MLX-40G + port2: + # dut2_instance/p1 - 40GE port2 on Mellanox NIC. + mac_address: "{{ dut1_if2_mac | lower | replace('-',':') }}" + pci_address: "0003:00:02.0" + link: link21 + model: Azure-MLX-40G + DUT2: + type: DUT + host: "{{ dut2_public_ip }}" + arch: x86_64 + port: 22 + username: testuser + password: Csit1234 + uio_driver: vfio-pci + honeycomb: + user: admin + passwd: admin + port: 8183 + netconf_port: 2831 + interfaces: + port1: + # dut1_instance/p1 - 40GE port1 on Mellanox NIC. + mac_address: "{{ dut2_if1_mac | lower | replace('-',':') }}" + pci_address: "0002:00:02.0" + link: link21 + model: Azure-MLX-40G + port2: + # dut2_instance/p1 - 40GE port2 on Mellanox NIC. + mac_address: "{{ dut2_if2_mac | lower | replace('-',':') }}" + pci_address: "0003:00:02.0" + link: link2 + model: Azure-MLX-40G diff --git a/fdio.infra.ansible/roles/trex/defaults/main.yaml b/fdio.infra.ansible/roles/trex/defaults/main.yaml new file mode 100644 index 0000000000..19bb15e9d3 --- /dev/null +++ b/fdio.infra.ansible/roles/trex/defaults/main.yaml @@ -0,0 +1,44 @@ +--- +# file: roles/trex/defaults/main.yaml + +packages: "{{ packages_base + packages_by_distro[ansible_distribution|lower][ansible_distribution_release] + packages_by_arch[ansible_machine] }}" + +packages_base: + - [] + +packages_by_distro: + ubuntu: + bionic: + - "build-essential" + - "libmnl-dev" + - "libnuma-dev" + - "libpcap-dev" + - "librdmacm-dev" + - "librdmacm1" + - "libssl-dev" + - "pciutils" + - "python3-pip" + - "zlib1g-dev" + focal: + - "build-essential" + - "libmnl-dev" + - "libnuma-dev" + - "libpcap-dev" + - "librdmacm-dev" + - "librdmacm1" + - "libssl-dev" + - "pciutils" + - "python3-pip" + - "zlib1g-dev" + +packages_by_arch: + aarch64: + - [] + x86_64: + - [] + +trex_target_dir: "/opt" +trex_url: "https://github.com/cisco-system-traffic-generator/trex-core/archive/" +trex_version: + # master // ubuntu 20.04 + - "2.88" \ No newline at end of file diff --git a/fdio.infra.ansible/roles/trex/files/t-rex.patch b/fdio.infra.ansible/roles/trex/files/t-rex.patch new file mode 100644 index 0000000000..e7db647779 --- /dev/null +++ b/fdio.infra.ansible/roles/trex/files/t-rex.patch @@ -0,0 +1,548 @@ +diff --git a/linux_dpdk/ws_main.py b/linux_dpdk/ws_main.py +index e8d0cd51..a0c01adb 100755 +--- a/linux_dpdk/ws_main.py ++++ b/linux_dpdk/ws_main.py +@@ -209,7 +209,7 @@ def check_ofed(ctx): + + ofed_ver= 42 + ofed_ver_show= '4.2' +- ++ return True + if not os.path.isfile(ofed_info): + ctx.end_msg('not found', 'YELLOW') + return False +@@ -1552,8 +1552,6 @@ class build_option: + flags += ['-DNDEBUG']; + else: + flags += ['-UNDEBUG']; +- if bld.env.OFED_OK: +- flags += ['-DHAVE_IBV_MLX4_WQE_LSO_SEG=1'] + return (flags) + + def get_bnxt_flags(self): +diff --git a/src/dpdk/drivers/net/mlx4/mlx4_autoconf.h b/src/dpdk/drivers/net/mlx4/mlx4_autoconf.h +index b3d68683..35474409 100644 +--- a/src/dpdk/drivers/net/mlx4/mlx4_autoconf.h ++++ b/src/dpdk/drivers/net/mlx4/mlx4_autoconf.h +@@ -1,3 +1,6 @@ +-#ifndef HAVE_IBV_MLX4_WQE_LSO_SEG +-#define HAVE_IBV_MLX4_WQE_LSO_SEG +-#endif ++/* HAVE_IBV_MLX4_BUF_ALLOCATORS is not defined. */ ++ ++/* HAVE_IBV_MLX4_UAR_MMAP_OFFSET is not defined. */ ++ ++/* HAVE_IBV_MLX4_WQE_LSO_SEG is not defined. */ ++ +diff --git a/src/dpdk/drivers/net/mlx5/mlx5_autoconf.h b/src/dpdk/drivers/net/mlx5/mlx5_autoconf.h +index 8770fdde..75db5ae8 100644 +--- a/src/dpdk/drivers/net/mlx5/mlx5_autoconf.h ++++ b/src/dpdk/drivers/net/mlx5/mlx5_autoconf.h +@@ -1,54 +1,362 @@ +-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT +-#define HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT +-#endif ++/* HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT is not defined. */ + +-#ifndef HAVE_IBV_FLOW_DV_SUPPORT +-#define HAVE_IBV_FLOW_DV_SUPPORT +-#endif ++#ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT ++#define HAVE_IBV_DEVICE_TUNNEL_SUPPORT 1 ++#endif /* HAVE_IBV_DEVICE_TUNNEL_SUPPORT */ + +-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45 +-#define HAVE_IBV_DEVICE_COUNTERS_SET_V45 +-#endif ++/* HAVE_IBV_DEVICE_MPLS_SUPPORT is not defined. */ + +-#ifndef HAVE_IBV_FLOW_DEVX_COUNTERS +-#define HAVE_IBV_FLOW_DEVX_COUNTERS +-#endif ++#ifndef HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING ++#define HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING 1 ++#endif /* HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING */ + +-#ifndef HAVE_IBV_MLX4_WQE_LSO_SEG +-#define HAVE_IBV_MLX4_WQE_LSO_SEG +-#endif ++/* HAVE_IBV_WQ_FLAG_RX_END_PADDING is not defined. */ + ++#ifndef HAVE_IBV_MLX5_MOD_SWP ++#define HAVE_IBV_MLX5_MOD_SWP 1 ++#endif /* HAVE_IBV_MLX5_MOD_SWP */ + +-#ifdef SUPPORTED_40000baseKR4_Full ++#ifndef HAVE_IBV_MLX5_MOD_MPW ++#define HAVE_IBV_MLX5_MOD_MPW 1 ++#endif /* HAVE_IBV_MLX5_MOD_MPW */ ++ ++#ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP ++#define HAVE_IBV_MLX5_MOD_CQE_128B_COMP 1 ++#endif /* HAVE_IBV_MLX5_MOD_CQE_128B_COMP */ ++ ++#ifndef HAVE_IBV_MLX5_MOD_CQE_128B_PAD ++#define HAVE_IBV_MLX5_MOD_CQE_128B_PAD 1 ++#endif /* HAVE_IBV_MLX5_MOD_CQE_128B_PAD */ ++ ++/* HAVE_IBV_FLOW_DV_SUPPORT is not defined. */ ++ ++/* HAVE_MLX5DV_DR is not defined. */ ++ ++/* HAVE_MLX5DV_DR_ESWITCH is not defined. */ ++ ++/* HAVE_IBV_DEVX_OBJ is not defined. */ ++ ++/* HAVE_IBV_FLOW_DEVX_COUNTERS is not defined. */ ++ ++#ifndef HAVE_ETHTOOL_LINK_MODE_25G ++#define HAVE_ETHTOOL_LINK_MODE_25G 1 ++#endif /* HAVE_ETHTOOL_LINK_MODE_25G */ ++ ++#ifndef HAVE_ETHTOOL_LINK_MODE_50G ++#define HAVE_ETHTOOL_LINK_MODE_50G 1 ++#endif /* HAVE_ETHTOOL_LINK_MODE_50G */ ++ ++#ifndef HAVE_ETHTOOL_LINK_MODE_100G ++#define HAVE_ETHTOOL_LINK_MODE_100G 1 ++#endif /* HAVE_ETHTOOL_LINK_MODE_100G */ ++ ++/* HAVE_IBV_DEVICE_COUNTERS_SET_V42 is not defined. */ ++ ++/* HAVE_IBV_DEVICE_COUNTERS_SET_V45 is not defined. */ ++ ++#ifndef HAVE_RDMA_NL_NLDEV ++#define HAVE_RDMA_NL_NLDEV 1 ++#endif /* HAVE_RDMA_NL_NLDEV */ ++ ++#ifndef HAVE_RDMA_NLDEV_CMD_GET ++#define HAVE_RDMA_NLDEV_CMD_GET 1 ++#endif /* HAVE_RDMA_NLDEV_CMD_GET */ ++ ++#ifndef HAVE_RDMA_NLDEV_CMD_PORT_GET ++#define HAVE_RDMA_NLDEV_CMD_PORT_GET 1 ++#endif /* HAVE_RDMA_NLDEV_CMD_PORT_GET */ ++ ++#ifndef HAVE_RDMA_NLDEV_ATTR_DEV_INDEX ++#define HAVE_RDMA_NLDEV_ATTR_DEV_INDEX 1 ++#endif /* HAVE_RDMA_NLDEV_ATTR_DEV_INDEX */ ++ ++#ifndef HAVE_RDMA_NLDEV_ATTR_DEV_NAME ++#define HAVE_RDMA_NLDEV_ATTR_DEV_NAME 1 ++#endif /* HAVE_RDMA_NLDEV_ATTR_DEV_NAME */ ++ ++#ifndef HAVE_RDMA_NLDEV_ATTR_PORT_INDEX ++#define HAVE_RDMA_NLDEV_ATTR_PORT_INDEX 1 ++#endif /* HAVE_RDMA_NLDEV_ATTR_PORT_INDEX */ ++ ++/* HAVE_RDMA_NLDEV_ATTR_NDEV_INDEX is not defined. */ ++ ++#ifndef HAVE_IFLA_NUM_VF ++#define HAVE_IFLA_NUM_VF 1 ++#endif /* HAVE_IFLA_NUM_VF */ ++ ++#ifndef HAVE_IFLA_EXT_MASK ++#define HAVE_IFLA_EXT_MASK 1 ++#endif /* HAVE_IFLA_EXT_MASK */ ++ ++#ifndef HAVE_IFLA_PHYS_SWITCH_ID ++#define HAVE_IFLA_PHYS_SWITCH_ID 1 ++#endif /* HAVE_IFLA_PHYS_SWITCH_ID */ ++ ++#ifndef HAVE_IFLA_PHYS_PORT_NAME ++#define HAVE_IFLA_PHYS_PORT_NAME 1 ++#endif /* HAVE_IFLA_PHYS_PORT_NAME */ ++ ++#ifndef HAVE_IFLA_VXLAN_COLLECT_METADATA ++#define HAVE_IFLA_VXLAN_COLLECT_METADATA 1 ++#endif /* HAVE_IFLA_VXLAN_COLLECT_METADATA */ ++ ++#ifndef HAVE_TCA_CHAIN ++#define HAVE_TCA_CHAIN 1 ++#endif /* HAVE_TCA_CHAIN */ ++ ++#ifndef HAVE_TCA_FLOWER_ACT ++#define HAVE_TCA_FLOWER_ACT 1 ++#endif /* HAVE_TCA_FLOWER_ACT */ ++ ++#ifndef HAVE_TCA_FLOWER_FLAGS ++#define HAVE_TCA_FLOWER_FLAGS 1 ++#endif /* HAVE_TCA_FLOWER_FLAGS */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_ETH_TYPE ++#define HAVE_TCA_FLOWER_KEY_ETH_TYPE 1 ++#endif /* HAVE_TCA_FLOWER_KEY_ETH_TYPE */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_ETH_DST ++#define HAVE_TCA_FLOWER_KEY_ETH_DST 1 ++#endif /* HAVE_TCA_FLOWER_KEY_ETH_DST */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_ETH_DST_MASK ++#define HAVE_TCA_FLOWER_KEY_ETH_DST_MASK 1 ++#endif /* HAVE_TCA_FLOWER_KEY_ETH_DST_MASK */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_ETH_SRC ++#define HAVE_TCA_FLOWER_KEY_ETH_SRC 1 ++#endif /* HAVE_TCA_FLOWER_KEY_ETH_SRC */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_ETH_SRC_MASK ++#define HAVE_TCA_FLOWER_KEY_ETH_SRC_MASK 1 ++#endif /* HAVE_TCA_FLOWER_KEY_ETH_SRC_MASK */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_IP_PROTO ++#define HAVE_TCA_FLOWER_KEY_IP_PROTO 1 ++#endif /* HAVE_TCA_FLOWER_KEY_IP_PROTO */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_IPV4_SRC ++#define HAVE_TCA_FLOWER_KEY_IPV4_SRC 1 ++#endif /* HAVE_TCA_FLOWER_KEY_IPV4_SRC */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_IPV4_SRC_MASK ++#define HAVE_TCA_FLOWER_KEY_IPV4_SRC_MASK 1 ++#endif /* HAVE_TCA_FLOWER_KEY_IPV4_SRC_MASK */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_IPV4_DST ++#define HAVE_TCA_FLOWER_KEY_IPV4_DST 1 ++#endif /* HAVE_TCA_FLOWER_KEY_IPV4_DST */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_IPV4_DST_MASK ++#define HAVE_TCA_FLOWER_KEY_IPV4_DST_MASK 1 ++#endif /* HAVE_TCA_FLOWER_KEY_IPV4_DST_MASK */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_IPV6_SRC ++#define HAVE_TCA_FLOWER_KEY_IPV6_SRC 1 ++#endif /* HAVE_TCA_FLOWER_KEY_IPV6_SRC */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_IPV6_SRC_MASK ++#define HAVE_TCA_FLOWER_KEY_IPV6_SRC_MASK 1 ++#endif /* HAVE_TCA_FLOWER_KEY_IPV6_SRC_MASK */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_IPV6_DST ++#define HAVE_TCA_FLOWER_KEY_IPV6_DST 1 ++#endif /* HAVE_TCA_FLOWER_KEY_IPV6_DST */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_IPV6_DST_MASK ++#define HAVE_TCA_FLOWER_KEY_IPV6_DST_MASK 1 ++#endif /* HAVE_TCA_FLOWER_KEY_IPV6_DST_MASK */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_TCP_SRC ++#define HAVE_TCA_FLOWER_KEY_TCP_SRC 1 ++#endif /* HAVE_TCA_FLOWER_KEY_TCP_SRC */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_TCP_SRC_MASK ++#define HAVE_TCA_FLOWER_KEY_TCP_SRC_MASK 1 ++#endif /* HAVE_TCA_FLOWER_KEY_TCP_SRC_MASK */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_TCP_DST ++#define HAVE_TCA_FLOWER_KEY_TCP_DST 1 ++#endif /* HAVE_TCA_FLOWER_KEY_TCP_DST */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_TCP_DST_MASK ++#define HAVE_TCA_FLOWER_KEY_TCP_DST_MASK 1 ++#endif /* HAVE_TCA_FLOWER_KEY_TCP_DST_MASK */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_UDP_SRC ++#define HAVE_TCA_FLOWER_KEY_UDP_SRC 1 ++#endif /* HAVE_TCA_FLOWER_KEY_UDP_SRC */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_UDP_SRC_MASK ++#define HAVE_TCA_FLOWER_KEY_UDP_SRC_MASK 1 ++#endif /* HAVE_TCA_FLOWER_KEY_UDP_SRC_MASK */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_UDP_DST ++#define HAVE_TCA_FLOWER_KEY_UDP_DST 1 ++#endif /* HAVE_TCA_FLOWER_KEY_UDP_DST */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_UDP_DST_MASK ++#define HAVE_TCA_FLOWER_KEY_UDP_DST_MASK 1 ++#endif /* HAVE_TCA_FLOWER_KEY_UDP_DST_MASK */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_VLAN_ID ++#define HAVE_TCA_FLOWER_KEY_VLAN_ID 1 ++#endif /* HAVE_TCA_FLOWER_KEY_VLAN_ID */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_VLAN_PRIO ++#define HAVE_TCA_FLOWER_KEY_VLAN_PRIO 1 ++#endif /* HAVE_TCA_FLOWER_KEY_VLAN_PRIO */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE ++#define HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE 1 ++#endif /* HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_TCP_FLAGS ++#define HAVE_TCA_FLOWER_KEY_TCP_FLAGS 1 ++#endif /* HAVE_TCA_FLOWER_KEY_TCP_FLAGS */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_TCP_FLAGS_MASK ++#define HAVE_TCA_FLOWER_KEY_TCP_FLAGS_MASK 1 ++#endif /* HAVE_TCA_FLOWER_KEY_TCP_FLAGS_MASK */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_IP_TOS ++#define HAVE_TCA_FLOWER_KEY_IP_TOS 1 ++#endif /* HAVE_TCA_FLOWER_KEY_IP_TOS */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_IP_TOS_MASK ++#define HAVE_TCA_FLOWER_KEY_IP_TOS_MASK 1 ++#endif /* HAVE_TCA_FLOWER_KEY_IP_TOS_MASK */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_IP_TTL ++#define HAVE_TCA_FLOWER_KEY_IP_TTL 1 ++#endif /* HAVE_TCA_FLOWER_KEY_IP_TTL */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_IP_TTL_MASK ++#define HAVE_TCA_FLOWER_KEY_IP_TTL_MASK 1 ++#endif /* HAVE_TCA_FLOWER_KEY_IP_TTL_MASK */ ++ ++#ifndef HAVE_TC_ACT_GOTO_CHAIN ++#define HAVE_TC_ACT_GOTO_CHAIN 1 ++#endif /* HAVE_TC_ACT_GOTO_CHAIN */ ++ ++#ifndef HAVE_TC_ACT_VLAN ++#define HAVE_TC_ACT_VLAN 1 ++#endif /* HAVE_TC_ACT_VLAN */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_ENC_KEY_ID ++#define HAVE_TCA_FLOWER_KEY_ENC_KEY_ID 1 ++#endif /* HAVE_TCA_FLOWER_KEY_ENC_KEY_ID */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV4_SRC ++#define HAVE_TCA_FLOWER_KEY_ENC_IPV4_SRC 1 ++#endif /* HAVE_TCA_FLOWER_KEY_ENC_IPV4_SRC */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK ++#define HAVE_TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK 1 ++#endif /* HAVE_TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV4_DST ++#define HAVE_TCA_FLOWER_KEY_ENC_IPV4_DST 1 ++#endif /* HAVE_TCA_FLOWER_KEY_ENC_IPV4_DST */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV4_DST_MASK ++#define HAVE_TCA_FLOWER_KEY_ENC_IPV4_DST_MASK 1 ++#endif /* HAVE_TCA_FLOWER_KEY_ENC_IPV4_DST_MASK */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV6_SRC ++#define HAVE_TCA_FLOWER_KEY_ENC_IPV6_SRC 1 ++#endif /* HAVE_TCA_FLOWER_KEY_ENC_IPV6_SRC */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK ++#define HAVE_TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK 1 ++#endif /* HAVE_TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV6_DST ++#define HAVE_TCA_FLOWER_KEY_ENC_IPV6_DST 1 ++#endif /* HAVE_TCA_FLOWER_KEY_ENC_IPV6_DST */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_ENC_IPV6_DST_MASK ++#define HAVE_TCA_FLOWER_KEY_ENC_IPV6_DST_MASK 1 ++#endif /* HAVE_TCA_FLOWER_KEY_ENC_IPV6_DST_MASK */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_ENC_UDP_SRC_PORT ++#define HAVE_TCA_FLOWER_KEY_ENC_UDP_SRC_PORT 1 ++#endif /* HAVE_TCA_FLOWER_KEY_ENC_UDP_SRC_PORT */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK ++#define HAVE_TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK 1 ++#endif /* HAVE_TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_ENC_UDP_DST_PORT ++#define HAVE_TCA_FLOWER_KEY_ENC_UDP_DST_PORT 1 ++#endif /* HAVE_TCA_FLOWER_KEY_ENC_UDP_DST_PORT */ ++ ++#ifndef HAVE_TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK ++#define HAVE_TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK 1 ++#endif /* HAVE_TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK */ ++ ++/* HAVE_TCA_FLOWER_KEY_ENC_IP_TOS is not defined. */ ++ ++/* HAVE_TCA_FLOWER_KEY_ENC_IP_TOS_MASK is not defined. */ ++ ++/* HAVE_TCA_FLOWER_KEY_ENC_IP_TTL is not defined. */ ++ ++/* HAVE_TCA_FLOWER_KEY_ENC_IP_TTL_MASK is not defined. */ ++ ++#ifndef HAVE_TC_ACT_TUNNEL_KEY ++#define HAVE_TC_ACT_TUNNEL_KEY 1 ++#endif /* HAVE_TC_ACT_TUNNEL_KEY */ ++ ++#ifndef HAVE_TCA_TUNNEL_KEY_ENC_DST_PORT ++#define HAVE_TCA_TUNNEL_KEY_ENC_DST_PORT 1 ++#endif /* HAVE_TCA_TUNNEL_KEY_ENC_DST_PORT */ ++ ++/* HAVE_TCA_TUNNEL_KEY_ENC_TOS is not defined. */ ++ ++/* HAVE_TCA_TUNNEL_KEY_ENC_TTL is not defined. */ ++ ++#ifndef HAVE_TCA_TUNNEL_KEY_NO_CSUM ++#define HAVE_TCA_TUNNEL_KEY_NO_CSUM 1 ++#endif /* HAVE_TCA_TUNNEL_KEY_NO_CSUM */ ++ ++#ifndef HAVE_TC_ACT_PEDIT ++#define HAVE_TC_ACT_PEDIT 1 ++#endif /* HAVE_TC_ACT_PEDIT */ ++ ++#ifndef HAVE_SUPPORTED_40000baseKR4_Full + #define HAVE_SUPPORTED_40000baseKR4_Full 1 +-#endif ++#endif /* HAVE_SUPPORTED_40000baseKR4_Full */ + +-#ifdef SUPPORTED_40000baseCR4_Full ++#ifndef HAVE_SUPPORTED_40000baseCR4_Full + #define HAVE_SUPPORTED_40000baseCR4_Full 1 +-#endif ++#endif /* HAVE_SUPPORTED_40000baseCR4_Full */ + +-#ifdef SUPPORTED_40000baseSR4_Full ++#ifndef HAVE_SUPPORTED_40000baseSR4_Full + #define HAVE_SUPPORTED_40000baseSR4_Full 1 +-#endif ++#endif /* HAVE_SUPPORTED_40000baseSR4_Full */ + +-#ifdef SUPPORTED_40000baseLR4_Full ++#ifndef HAVE_SUPPORTED_40000baseLR4_Full + #define HAVE_SUPPORTED_40000baseLR4_Full 1 +-#endif ++#endif /* HAVE_SUPPORTED_40000baseLR4_Full */ + +-#ifdef SUPPORTED_56000baseKR4_Full ++#ifndef HAVE_SUPPORTED_56000baseKR4_Full + #define HAVE_SUPPORTED_56000baseKR4_Full 1 +-#endif ++#endif /* HAVE_SUPPORTED_56000baseKR4_Full */ + +-#ifdef SUPPORTED_56000baseCR4_Full ++#ifndef HAVE_SUPPORTED_56000baseCR4_Full + #define HAVE_SUPPORTED_56000baseCR4_Full 1 +-#endif ++#endif /* HAVE_SUPPORTED_56000baseCR4_Full */ + +-#ifdef SUPPORTED_56000baseSR4_Full ++#ifndef HAVE_SUPPORTED_56000baseSR4_Full + #define HAVE_SUPPORTED_56000baseSR4_Full 1 +-#endif ++#endif /* HAVE_SUPPORTED_56000baseSR4_Full */ + +-#ifdef SUPPORTED_56000baseLR4_Full ++#ifndef HAVE_SUPPORTED_56000baseLR4_Full + #define HAVE_SUPPORTED_56000baseLR4_Full 1 +-#endif ++#endif /* HAVE_SUPPORTED_56000baseLR4_Full */ + ++#ifndef HAVE_STATIC_ASSERT ++#define HAVE_STATIC_ASSERT 1 ++#endif /* HAVE_STATIC_ASSERT */ + +diff --git a/src/dpdk/drivers/net/tap/rte_eth_tap.c b/src/dpdk/drivers/net/tap/rte_eth_tap.c +index bc889c19..47a2b68f 100644 +--- a/src/dpdk/drivers/net/tap/rte_eth_tap.c ++++ b/src/dpdk/drivers/net/tap/rte_eth_tap.c +@@ -34,8 +34,8 @@ + #include + #include + #include +-#include +-#include ++#include ++#include + #include + #include + +diff --git a/src/dpdk/drivers/net/tap/rte_eth_tap.h b/src/dpdk/drivers/net/tap/rte_eth_tap.h +index 66cd3441..dc3579ac 100644 +--- a/src/dpdk/drivers/net/tap/rte_eth_tap.h ++++ b/src/dpdk/drivers/net/tap/rte_eth_tap.h +@@ -11,7 +11,7 @@ + #include + #include + +-#include ++#include + + #include + #include +diff --git a/src/dpdk/drivers/net/tap/tap_autoconf.h b/src/dpdk/drivers/net/tap/tap_autoconf.h +index dddd4ae6..d5880608 100644 +--- a/src/dpdk/drivers/net/tap/tap_autoconf.h ++++ b/src/dpdk/drivers/net/tap/tap_autoconf.h +@@ -1,14 +1,24 @@ + #ifndef HAVE_TC_FLOWER + #define HAVE_TC_FLOWER 1 +-#endif ++#endif /* HAVE_TC_FLOWER */ + ++#ifndef HAVE_TC_VLAN_ID ++#define HAVE_TC_VLAN_ID 1 ++#endif /* HAVE_TC_VLAN_ID */ + + #ifndef HAVE_TC_BPF + #define HAVE_TC_BPF 1 +-#endif ++#endif /* HAVE_TC_BPF */ + +-#ifndef HAVE_TC_VLAN_ID +-#define HAVE_TC_VLAN_ID 1 +-#endif ++#ifndef HAVE_TC_BPF_FD ++#define HAVE_TC_BPF_FD 1 ++#endif /* HAVE_TC_BPF_FD */ ++ ++#ifndef HAVE_TC_ACT_BPF ++#define HAVE_TC_ACT_BPF 1 ++#endif /* HAVE_TC_ACT_BPF */ + ++#ifndef HAVE_TC_ACT_BPF_FD ++#define HAVE_TC_ACT_BPF_FD 1 ++#endif /* HAVE_TC_ACT_BPF_FD */ + +diff --git a/src/dpdk/drivers/net/tap/tap_netlink.h b/src/dpdk/drivers/net/tap/tap_netlink.h +index 900ce375..faa73ba1 100644 +--- a/src/dpdk/drivers/net/tap/tap_netlink.h ++++ b/src/dpdk/drivers/net/tap/tap_netlink.h +@@ -8,8 +8,8 @@ + + #include + #include +-#include +-#include ++#include ++#include + #include + + #include +diff --git a/src/dpdk/drivers/net/tap/tap_tcmsgs.h b/src/dpdk/drivers/net/tap/tap_tcmsgs.h +index 782de540..8cedea84 100644 +--- a/src/dpdk/drivers/net/tap/tap_tcmsgs.h ++++ b/src/dpdk/drivers/net/tap/tap_tcmsgs.h +@@ -7,13 +7,13 @@ + #define _TAP_TCMSGS_H_ + + #include +-#include +-#include +-#include +-#include +-#include +-#include +-#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include + #ifdef HAVE_TC_ACT_BPF + #include + #endif +diff --git a/src/main_dpdk.cpp b/src/main_dpdk.cpp +index 0f66b07a..8c37ea15 100644 +--- a/src/main_dpdk.cpp ++++ b/src/main_dpdk.cpp +@@ -6969,6 +6969,7 @@ COLD_FUNC bool DpdkTRexPortAttr::update_link_status_nowait(){ + bool changed = false; + rte_eth_link_get_nowait(m_repid, &new_link); + ++ new_link.link_speed = ETH_SPEED_NUM_50G; + if (new_link.link_speed != m_link.link_speed || + new_link.link_duplex != m_link.link_duplex || + new_link.link_autoneg != m_link.link_autoneg || diff --git a/fdio.infra.ansible/roles/trex/tasks/deploy_block.yaml b/fdio.infra.ansible/roles/trex/tasks/deploy_block.yaml new file mode 100644 index 0000000000..5a7890b071 --- /dev/null +++ b/fdio.infra.ansible/roles/trex/tasks/deploy_block.yaml @@ -0,0 +1,55 @@ +--- +# file: roles/trex/tasks/deploy_block.yaml + +- name: Get Release {{ item }} + get_url: + url: "{{ trex_url }}/v{{ item }}.tar.gz" + dest: "{{ trex_target_dir }}/trex-core-{{ item }}.tar.gz" + validate_certs: False + mode: 0644 + register: trex_downloaded + +- name: Create Directory {{ item }} + file: + path: "{{ trex_target_dir }}/trex-core-{{ item }}" + state: "directory" + +- name: Extract Release {{ item }} + unarchive: + remote_src: true + src: "{{ trex_target_dir }}/trex-core-{{ item }}.tar.gz" + dest: "{{ trex_target_dir }}/" + creates: "{{ trex_target_dir }}/trex-core-{{ item }}/linux_dpdk/" + register: trex_extracted + +- name: Patch Azure + patch: + src: "files/t-rex.patch" + basedir: "{{ trex_target_dir }}/trex-core-{{ item }}" + strip: 1 + when: + - azure is defined and item == "2.73" + +- name: Compile Release {{ item }} Part I + command: "./b configure" + args: + chdir: "{{ trex_target_dir }}/trex-core-{{ item }}/linux_dpdk/" + when: trex_extracted.changed + +- name: Compile Release {{ item }} Part II + command: "./b build" + args: + chdir: "{{ trex_target_dir }}/trex-core-{{ item }}/linux_dpdk/" + when: trex_extracted.changed + +- name: Compile Release {{ item }} Part III + command: "make -j 16" + args: + chdir: "{{ trex_target_dir }}/trex-core-{{ item }}/scripts/ko/src" + when: trex_extracted.changed + +- name: Compile Release {{ item }} Part IV + command: "make install" + args: + chdir: "{{ trex_target_dir }}/trex-core-{{ item }}/scripts/ko/src" + when: trex_extracted.changed \ No newline at end of file diff --git a/fdio.infra.ansible/roles/trex/tasks/main.yaml b/fdio.infra.ansible/roles/trex/tasks/main.yaml new file mode 100644 index 0000000000..d43baf909b --- /dev/null +++ b/fdio.infra.ansible/roles/trex/tasks/main.yaml @@ -0,0 +1,24 @@ +--- +# file: roles/trex/tasks/main.yaml + +- name: Inst - Update Package Cache (APT) + apt: + update_cache: yes + cache_valid_time: 3600 + when: + - ansible_distribution|lower == 'ubuntu' + tags: + - trex-inst-prerequisites + +- name: Inst - Prerequisites + package: + name: "{{ packages | flatten(levels=1) }}" + state: latest + tags: + - trex-inst-prerequisites + +- name: Inst - Multiple T-Rex Versions + include_tasks: deploy_block.yaml + loop: "{{ trex_version }}" + tags: + - trex-inst \ No newline at end of file diff --git a/fdio.infra.ansible/roles/user_add/defaults/main.yaml b/fdio.infra.ansible/roles/user_add/defaults/main.yaml new file mode 100644 index 0000000000..56f5098f12 --- /dev/null +++ b/fdio.infra.ansible/roles/user_add/defaults/main.yaml @@ -0,0 +1,11 @@ +--- +# file: roles/user_add/defaults/main.yaml + +# Default shell for a user if none is specified. +users_shell: /bin/bash + +# Default create home dirs for new users. +users_create_homedirs: true + +# Default list of users to create. +users: [] diff --git a/fdio.infra.ansible/roles/user_add/handlers/main.yaml b/fdio.infra.ansible/roles/user_add/handlers/main.yaml new file mode 100644 index 0000000000..960f573b48 --- /dev/null +++ b/fdio.infra.ansible/roles/user_add/handlers/main.yaml @@ -0,0 +1,7 @@ +--- +# file: roles/user_add/handlers/main.yaml + +- name: Restart SSHd + service: + name: sshd + state: restarted diff --git a/fdio.infra.ansible/roles/user_add/tasks/main.yaml b/fdio.infra.ansible/roles/user_add/tasks/main.yaml new file mode 100644 index 0000000000..f980aff84d --- /dev/null +++ b/fdio.infra.ansible/roles/user_add/tasks/main.yaml @@ -0,0 +1,48 @@ +--- +# file: roles/user_add/tasks/main.yaml + +- name: Conf - Add User + user: + append: "{{ item.append | default(omit) }}" + createhome: "{{ 'yes' if users_create_homedirs else 'no' }}" + generate_ssh_key: "{{ item.generate_ssh_key | default(omit) }}" + groups: "{{ item.groups | join(',') if 'groups' in item else '' }}" + name: "{{ item.username }}" + password: "{{ item.password if item.password is defined else '!' }}" + shell: "{{ item.shell if item.shell is defined else users_shell }}" + state: present + with_items: "{{ users }}" + tags: + - user-add-conf + +- name: Conf - SSH keys + authorized_key: + user: "{{ item.0.username }}" + key: "{{ item.1 }}" + with_subelements: + - "{{ users }}" + - ssh_key + - skip_missing: yes + tags: + - user-add-conf + +- name: Conf - Allow Password Login + lineinfile: + dest: "/etc/ssh/sshd_config" + regexp: "^PasswordAuthentication no" + line: "PasswordAuthentication yes" + notify: + - "Restart SSHd" + tags: + - user-add-conf + +- name: Conf - Add Visudo Entry + lineinfile: + dest: "/etc/sudoers" + state: present + line: "{{ item.username }} ALL=(ALL) NOPASSWD: ALL" + validate: "visudo -cf %s" + with_items: "{{ users }}" + tags: + - user-add-conf + diff --git a/fdio.infra.ansible/roles/vpp/defaults/main.yaml b/fdio.infra.ansible/roles/vpp/defaults/main.yaml new file mode 100644 index 0000000000..7fac499307 --- /dev/null +++ b/fdio.infra.ansible/roles/vpp/defaults/main.yaml @@ -0,0 +1,36 @@ +--- +# file: roles/vpp/defaults/main.yaml + +packages: "{{ packages_base + packages_by_distro[ansible_distribution|lower][ansible_distribution_release] + packages_by_arch[ansible_machine] }}" + +packages_base: + - "gdb" + - "libtool" + - "lxc" + - "pkg-config" + - "screen" + +packages_by_distro: + ubuntu: + bionic: + - "build-essential" + - "libglib2.0-dev" + - "libmbedcrypto1" + - "libmbedtls10" + - "libmbedx509-0" + - "libnuma-dev" + - "libpixman-1-dev" + focal: + - "build-essential" + - "libglib2.0-dev" + - "libmbedcrypto3" + - "libmbedtls12" + - "libmbedx509-0" + - "libnuma-dev" + - "libpixman-1-dev" + +packages_by_arch: + aarch64: + - [] + x86_64: + - [] diff --git a/fdio.infra.ansible/roles/vpp/tasks/main.yaml b/fdio.infra.ansible/roles/vpp/tasks/main.yaml new file mode 100644 index 0000000000..ef03011b51 --- /dev/null +++ b/fdio.infra.ansible/roles/vpp/tasks/main.yaml @@ -0,0 +1,27 @@ +--- +# file: roles/vpp/tasks/main.yaml + +- name: Inst - Update Package Cache (APT) + apt: + update_cache: yes + cache_valid_time: 3600 + when: + - ansible_distribution|lower == 'ubuntu' + tags: + - vpp-inst-prerequisites + +- name: Inst - Prerequisites + package: + name: "{{ packages | flatten(levels=1) }}" + state: latest + tags: + - vpp-inst-prerequisites + +- name: Conf - sysctl + file: + src: "/dev/null" + dest: "/etc/sysctl.d/80-vpp.conf" + state: "link" + become: yes + tags: + - vpp-conf-sysctl diff --git a/fdio.infra.ansible/roles/vpp_device/files/csit-initialize-vfs-default.sh b/fdio.infra.ansible/roles/vpp_device/files/csit-initialize-vfs-default.sh new file mode 100644 index 0000000000..d0fc772037 --- /dev/null +++ b/fdio.infra.ansible/roles/vpp_device/files/csit-initialize-vfs-default.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Add Intel Corporation Ethernet Controller 10G X550T to blacklist. +PCI_BLACKLIST=($(lspci -Dmmd ':1563:0200' | cut -f1 -d' ')) + +# Add Intel Corporation Ethernet Controller X710 for 10GbE SFP+ to whitelist. +PCI_WHITELIST=($(lspci -Dmmd ':1572:0200' | cut -f1 -d' ')) + +# See http://pci-ids.ucw.cz/v2.2/pci.ids for more info. diff --git a/fdio.infra.ansible/roles/vpp_device/files/csit-initialize-vfs-tx2.sh b/fdio.infra.ansible/roles/vpp_device/files/csit-initialize-vfs-tx2.sh new file mode 100644 index 0000000000..6c56752ad0 --- /dev/null +++ b/fdio.infra.ansible/roles/vpp_device/files/csit-initialize-vfs-tx2.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021 PANTHEON.tech and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Add QLogic Corp. FastLinQ QL41000 Series 10/25/40/50GbE Controller to +# blacklist. +PCI_BLACKLIST=($(lspci -Dmmd ':8070:0200' | cut -f1 -d' ')) +# Add I350 Gigabit Network Connection 1521 to blacklist. +PCI_BLACKLIST+=($(lspci -Dmmd ':1521:0200' | cut -f1 -d' ')) +# Add MT27800 Family [ConnectX-5] 1017 to blacklist. +PCI_BLACKLIST+=($(lspci -Dmmd ':1017:0200' | cut -f1 -d' ')) + +# Add Intel Corporation Ethernet Controller XL710 for 40GbE QSFP+ to whitelist. +PCI_WHITELIST=($(lspci -Dmmd ':1583:0200' | cut -f1 -d' ')) + +# See http://pci-ids.ucw.cz/v2.2/pci.ids for more info. + +declare -A PF_INDICES +# Intel NICs +PF_INDICES["0000:05:00.0"]=0 +PF_INDICES["0000:05:00.1"]=1 +PF_INDICES["0000:91:00.0"]=0 +PF_INDICES["0000:91:00.1"]=1 diff --git a/fdio.infra.ansible/roles/vpp_device/files/csit-initialize-vfs.service b/fdio.infra.ansible/roles/vpp_device/files/csit-initialize-vfs.service new file mode 100644 index 0000000000..996792ab9b --- /dev/null +++ b/fdio.infra.ansible/roles/vpp_device/files/csit-initialize-vfs.service @@ -0,0 +1,12 @@ +[Unit] +Description=CSIT Initialize SR-IOV VFs +After=network.target + +[Service] +Type=oneshot +RemainAfterExit=True +ExecStart=/usr/local/bin/csit-initialize-vfs.sh start +ExecStop=/usr/local/bin/csit-initialize-vfs.sh stop + +[Install] +WantedBy=default.target diff --git a/fdio.infra.ansible/roles/vpp_device/files/csit-initialize-vfs.sh b/fdio.infra.ansible/roles/vpp_device/files/csit-initialize-vfs.sh new file mode 100644 index 0000000000..393e997d65 --- /dev/null +++ b/fdio.infra.ansible/roles/vpp_device/files/csit-initialize-vfs.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash + +# Copyright (c) 2021 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# CSIT SRIOV VF initialization and isolation. + +set -euo pipefail + +SCRIPT_DIR="$(dirname $(readlink -e "${BASH_SOURCE[0]}"))" +source "${SCRIPT_DIR}/csit-initialize-vfs-data.sh" + +# Initilize whitelisted NICs with maximum number of VFs. +pci_idx=0 +for pci_addr in ${PCI_WHITELIST[@]}; do + if ! [[ ${PCI_BLACKLIST[*]} =~ "${pci_addr}" ]]; then + pci_path="/sys/bus/pci/devices/${pci_addr}" + # SR-IOV initialization + case "${1:-start}" in + "start" ) + sriov_totalvfs=$(< "${pci_path}"/sriov_totalvfs) + ;; + "stop" ) + sriov_totalvfs=0 + ;; + esac + echo ${sriov_totalvfs} > "${pci_path}"/sriov_numvfs + # SR-IOV 802.1Q isolation + case "${1:-start}" in + "start" ) + pf=$(basename "${pci_path}"/net/*) + for vf in $(seq "${sriov_totalvfs}"); do + # PCI address index in array (pairing siblings). + if [[ -n ${PF_INDICES[@]} ]] + then + vlan_pf_idx=${PF_INDICES[$pci_addr]} + else + vlan_pf_idx=$(( pci_idx % (${#PCI_WHITELIST[@]} / 2) )) + fi + # 802.1Q base offset. + vlan_bs_off=1100 + # 802.1Q PF PCI address offset. + vlan_pf_off=$(( vlan_pf_idx * 100 + vlan_bs_off )) + # 802.1Q VF PCI address offset. + vlan_vf_off=$(( vlan_pf_off + vf - 1 )) + # VLAN string. + vlan_str="vlan ${vlan_vf_off}" + # MAC string. + mac5="$(printf '%x' ${pci_idx})" + mac6="$(printf '%x' $(( vf - 1 )))" + mac_str="mac ba:dc:0f:fe:${mac5}:${mac6}" + # Set 802.1Q VLAN id and MAC address + ip link set ${pf} vf $(( vf - 1 )) ${mac_str} ${vlan_str} + ip link set ${pf} vf $(( vf - 1 )) trust on + ip link set ${pf} vf $(( vf - 1 )) spoof off + done + pci_idx=$(( pci_idx + 1 )) + ;; + esac + rmmod i40evf + modprobe i40evf + fi +done diff --git a/fdio.infra.ansible/roles/vpp_device/handlers/main.yaml b/fdio.infra.ansible/roles/vpp_device/handlers/main.yaml new file mode 100644 index 0000000000..ee9d368638 --- /dev/null +++ b/fdio.infra.ansible/roles/vpp_device/handlers/main.yaml @@ -0,0 +1,21 @@ +--- +# file: roles/vpp_device/handlers/main.yaml + +- name: Start csit-initialize-vfs.service + systemd: + enabled: yes + state: started + name: csit-initialize-vfs.service + tags: + - start-vf-service + +- name: Update GRUB + command: update-grub + tags: + - update-grub + +- name: Reboot server + reboot: + reboot_timeout: 3600 + tags: + - reboot-server diff --git a/fdio.infra.ansible/roles/vpp_device/tasks/main.yaml b/fdio.infra.ansible/roles/vpp_device/tasks/main.yaml new file mode 100644 index 0000000000..33b551715f --- /dev/null +++ b/fdio.infra.ansible/roles/vpp_device/tasks/main.yaml @@ -0,0 +1,92 @@ +--- +# file: roles/vpp_device/tasks/main.yaml + +- name: VPP_device - Load Kernel Modules By Default + lineinfile: + path: "/etc/modules" + state: "present" + line: "{{ item }}" + with_items: + - "vfio-pci" + tags: + - load-kernel-modules + +- name: VPP_device - Disable ipv6 router advertisement + sysctl: + name: "net.ipv6.conf.default.accept_ra" + value: "0" + state: "present" + sysctl_file: "/etc/sysctl.d/90-csit.conf" + reload: "yes" + tags: + - set-sysctl + +- name: VPP_device - Disable ipv6 router advertisement + sysctl: + name: "net.ipv6.conf.all.accept_ra" + value: "0" + state: "present" + sysctl_file: "/etc/sysctl.d/90-csit.conf" + reload: "yes" + tags: + - set-sysctl + +- name: VPP_device - Copy csit-initialize-vfs.sh + copy: + src: "files/csit-initialize-vfs.sh" + dest: "/usr/local/bin/" + owner: "root" + group: "root" + mode: "744" + tags: + - copy-vf-script + +- name: VPP_device - Copy csit-initialize-vfs-data.sh + copy: + src: "files/{{ vfs_data_file }}" + dest: "/usr/local/bin/csit-initialize-vfs-data.sh" + owner: "root" + group: "root" + mode: "744" + tags: copy-vf-data-script + when: + - vfs_data_file is defined + +- name: VPP_device - Copy default csit-initialize-vfs-data.sh + copy: + src: "files/csit-initialize-vfs-default.sh" + dest: "/usr/local/bin/csit-initialize-vfs-data.sh" + owner: "root" + group: "root" + mode: "744" + tags: copy-vf-data-script + when: + - vfs_data_file is not defined + +- name: VPP_device - Start csit-initialize-vfs.service + copy: + src: "files/csit-initialize-vfs.service" + dest: "/etc/systemd/system/" + owner: "root" + group: "root" + mode: "644" + notify: + - "Start csit-initialize-vfs.service" + tags: + - start-vf-service + +- meta: flush_handlers + +- name: VPP_device - Set hugepages in GRUB + lineinfile: + path: "/etc/default/grub" + state: "present" + regexp: "^GRUB_CMDLINE_LINUX=" + line: "GRUB_CMDLINE_LINUX=\"{% for key, value in grub.items() %}{% if value %}{{key}}={{value}} {% else %}{{key}} {% endif %}{% endfor %}\"" + notify: + - "Update GRUB" + - "Reboot server" + tags: + - set-grub + +- meta: flush_handlers diff --git a/fdio.infra.ansible/site.yaml b/fdio.infra.ansible/site.yaml new file mode 100644 index 0000000000..4436c21b18 --- /dev/null +++ b/fdio.infra.ansible/site.yaml @@ -0,0 +1,26 @@ +--- +# file: site.yaml + +- import_playbook: tg.yaml + tags: + - tg + - tg_aws + - tg_azure + +- import_playbook: sut.yaml + tags: + - sut + - sut_aws + - sut_azure + +- import_playbook: vpp_device.yaml + tags: + - vpp-device + +- import_playbook: nomad.yaml + tags: + - nomad + +- import_playbook: dev.yaml + tags: + - dev diff --git a/fdio.infra.ansible/sut.yaml b/fdio.infra.ansible/sut.yaml new file mode 100644 index 0000000000..2a413d7924 --- /dev/null +++ b/fdio.infra.ansible/sut.yaml @@ -0,0 +1,103 @@ +--- +# file: sut.yaml + +- hosts: sut + remote_user: testuser + become: yes + become_user: root + gather_facts: false + pre_tasks: + - name: Gathering Facts + gather_facts: + tags: + - always + roles: + - role: baremetal + tags: baremetal + - role: common + tags: common + - role: python_env + tags: python_env + - role: kernel + tags: kernel + - role: mellanox + tags: mellanox + - role: docker + tags: docker + - role: vpp + tags: vpp + - role: dpdk + tags: dpdk + - role: kernel_vm + tags: kernel_vm + - role: csit_sut_image + tags: csit_sut_image + - role: performance_tuning + tags: performance_tuning + - role: cleanup + tags: cleanup + - role: calibration + tags: calibration + +- hosts: sut_aws + remote_user: testuser + become: yes + become_user: root + gather_facts: false + pre_tasks: + - name: Gathering Facts + gather_facts: + tags: + - always + roles: + - role: user_add + tags: user_add + - role: common + tags: common + - role: python_env + tags: python_env + - role: vpp + tags: vpp + - role: dpdk + tags: dpdk + - role: iperf + tags: iperf + - role: docker + tags: docker + - role: aws + tags: aws + - role: cleanup + tags: cleanup + - role: calibration + tags: calibration + +- hosts: sut_azure + become: yes + become_user: root + gather_facts: false + pre_tasks: + - name: Gathering Facts + gather_facts: + tags: + - always + roles: + - role: user_add + tags: user_add + - role: common + tags: common + - role: python_env + tags: python_env + - role: docker + tags: docker + - role: vpp + tags: vpp + - role: iperf + tags: iperf + - role: dpdk + tags: dpdk + - role: azure + tags: azure + - role: cleanup + tags: cleanup + - role: calibration + tags: calibration diff --git a/fdio.infra.ansible/tg.yaml b/fdio.infra.ansible/tg.yaml new file mode 100644 index 0000000000..01d2cbc0ca --- /dev/null +++ b/fdio.infra.ansible/tg.yaml @@ -0,0 +1,111 @@ +--- +# file: tg.yaml + +- hosts: tg + remote_user: testuser + become: yes + become_user: root + gather_facts: false + pre_tasks: + - name: Gathering Facts + gather_facts: + tags: + - always + roles: + - role: baremetal + tags: baremetal + - role: common + tags: common + - role: python_env + tags: python_env + - role: kernel + tags: kernel + - role: mellanox + tags: mellanox + - role: docker + tags: docker + - role: iperf + tags: iperf + - role: trex + tags: trex + - role: ab + tags: ab + - role: tg + tags: tg + - role: csit_sut_image + tags: csit_sut_image + - role: performance_tuning + tags: performance_tuning + - role: cleanup + tags: cleanup + - role: calibration + tags: calibration + +- hosts: tg_aws + remote_user: testuser + become: yes + become_user: root + gather_facts: false + pre_tasks: + - name: Gathering Facts + gather_facts: + tags: + - always + roles: + - role: user_add + tags: user_add + - role: common + tags: common + - role: python_env + tags: python_env + - role: dpdk + tags: dpdk + - role: docker + tags: docker + - role: tg + tags: tg + - role: iperf + tags: iperf + - role: trex + tags: trex + - role: ab + tags: ab + - role: aws + tags: aws + - role: cleanup + tags: cleanup + - role: calibration + tags: calibration + +- hosts: tg_azure + become: yes + become_user: root + gather_facts: false + pre_tasks: + - name: Gathering Facts + gather_facts: + tags: + - always + roles: + - role: user_add + tags: user_add + - role: common + tags: common + - role: python_env + tags: python_env + - role: docker + tags: docker + - role: tg + tags: tg + - role: iperf + tags: iperf + - role: trex + tags: trex + - role: ab + tags: ab + - role: azure + tags: azure + - role: cleanup + tags: cleanup + - role: calibration + tags: calibration \ No newline at end of file diff --git a/fdio.infra.ansible/vault.yml b/fdio.infra.ansible/vault.yml new file mode 100644 index 0000000000..2c10624d07 --- /dev/null +++ b/fdio.infra.ansible/vault.yml @@ -0,0 +1,706 @@ +$ANSIBLE_VAULT;1.1;AES256 +39626435383866646264643632343836383132616162323536323732396265373539616464373932 +6237636263373339366163363739613139383330626437310a343862363835633663633164393464 +34653061656331363864323363663932306637613937383534343963316364636635313663343664 +3539373536313461350a626264363866306630656464646439303338383535323863393537346262 +61346533323162653766343763363934313937653237663437393133666463363962346331343836 +33393533313737626636356466623566393232303037636266353565653130646434323338376136 +32306537653134633062313732623830393166336135666263343933323138616436313632623533 +38646539623837626132346631393964663062386631313236353563393131376130316666343562 +35346632316461336564393264323632643232663136323334336336626339633365643565306436 +35333365356637316631666661356431396635636431383032643566373666363936363462616337 +62316234363963306338366638663064396365373264326635376134356333626130303834383534 +63356363373035623031666330626463613563313066303365303430643262346561633633666466 +63666661616139366431313832376665383638393835376261326465353938333963333264653236 +35613664656363643438316232306630353361343030353963386666656166373061666236326664 +63383033633733633361383061316232306566353062616163623563663032393734353963303361 +34323330336561323734373430323435393731323931343335613935396330663533323137323234 +39333132326364646336613362363365323331366430396535383138386261356534613832393863 +61363133356435303964353637346436613633366336386465326336313935323538643636313331 +39663832373337643865386164343764613764346331633261653039613136386636366431376565 +36373431303631323730343831363837303665353461303130326430636161623431613730356134 +66396632306139373732636165313834366339366363666566306461366266356338346566316363 +38386564313665306239646436386635396664643333383736396131353535336334383765653961 +31643833663361343036623634326662333935343332333066353732396132373561396631633265 +37643733393433336364353630666136616235656638343032333632373366616634343130313536 +64643834643236363731353664396636613266656263363664376539306539346639313838663738 +36623634396638313861636438306136613433313062306537653738316563613631363566623134 +62323161646464636131326661646535313436396534356335366564373165653662613536663561 +32366666356339326264326539393239666234633663366261623933366636636161343233383932 +34336638363864346436613164613161326363316431303963656233306663376332373731623437 +66353636336462663730316161313236393264633633363965653433306639643938383933373062 +30323331323735613139323963643335373335633265383235343835376363666132643431366665 +32396465313434343534343433633432396131323635356265663939616436343137393561633734 +36383961363964306431343939313837663166376133616365663939633161326338306561356666 +34663938613865383739376534306333653135383431316464613432366566623362393065306635 +64313737363065336234393463316138313864386163633966336662313366396431353632363365 +62313036656434616136626331333139343235333930363166313833306438616364393065333963 +36396137656634393066646330613365316562306164326133663365313938333337386137383864 +61376333346336396537613933323261663536353161643431383636316235393035313861326230 +31623566363138663866326231376561336534366435626134306463323032613630646361343165 +38663662653334316139313866346163623461656335616232353636636431636339313239346661 +33396133306132643732366232353166613263356165663866626465343936326461626336376462 +34376431643730616461376261356633356166313131623837343839323935343531356263336230 +62323236633339303031316165326362306535653064363862663330393034303964353437363565 +61616565656164313664306664663932333261333162383832326666393366353730306531343639 +64323530333466653534306130636161383463376632616232353361656661353831643966633639 +66336631396164663864653933623662343061316539306634393334653630393462376636346165 +33646635646633353263333838343534373838333836656536373639646636303033626537313464 +38323462396231653233333463313263313233353932356633366630623138376432383566363538 +38346336343765353830656139333564633533323538616166333865323439343361386135343130 +63343363303332313834313764356637643230386536613338623937393436613732383833366235 +64343862663234633232656333666438633464663463663737363431626361643532396664326661 +33383261643231313836326162373532626234383362653162656435366535643863316235656536 +62306334316566623732303834313864663636636334663130653230383365623031316164373161 +64326638636339303065386435303332333230666634323163393134613032326335313961306236 +37343666376235653637636136636133373966636136626137356331336234646366316364623134 +37633832386237346266376533663839343938623664653030636265303465643835666234383763 +36373439346566623739343361386562623962633539383134663237656662653939313938663938 +30653438306666613731326462633334313730363763666538666538356638633261353363393930 +37626261316565633162366636343539626238323861656565393162333662353563393139626261 +64663436336233636438326633303164333939633435373765333731666135366637653532396238 +61346538626662626463303965363061646261343232356639336366333065623765646335353638 +66616430343562366262333235323234383334343538633037643661623661306330343839333034 +62626533353238323064633862613736663666303666373262633533643539643962383333346264 +38346134363236633031393138313538666533643561373766613836636461393166306138333231 +63613735313636376538346230303662646633346363353535333232663033373036336564623766 +62316464363930366665663530626564346130316231313033323130323531396366303030616363 +36356431653435346561656132616632373834393833633865323762343037656439373331623664 +39633635306334646334386635356362346431366430326364303931336562333735613033346437 +37613636656261383034663633666138653537326364326430666632323633386636376635343232 +37643965663564376337616432623336326238626530333736313530386537383633653437356436 +66613230366666373734396538363362626531666331656362356364393230343132363033333061 +39396237316432663832343261393366306232363166313034383033313565373265353436666263 +38356564656331393733393333346164313233313937343330363662316431353233633063646438 +63386162363034363039663864636365666638343665386334383436353336313333653166343937 +35626138326333623362623639336461343737633730306630303963613465396464386534663763 +63383733643761613136386430646361363731343433656533653938366138373162313335373038 +63626639643634663962643130343566323730646432366136393335623134336238383761333733 +31633330343962316134333536376464663166316365386265393061316162363161376333393561 +31646361386332613837663634303134626133376630643730636466643961343462366566363737 +65373236323861343238343434316637663333346161633835306663313537383631303136323832 +30353862663462386262663461623432373034333631633133633563636336343831653934303033 +36303162323164663632393937643665643961313464623033383130663565333034636164643136 +62316639333662316133393533623561633435653036376637323739326339346632653837633637 +37663332386663396363653237353031343065366334333836343830363562383733653661323664 +61333633306531323639343931663061383031303937323261616165633163636133323130666133 +31353366386530323465646133343766663966663561616337313330353436303234346136326365 +64343561656532366538336662656261396337623763343966366136646638643739373037336361 +65336538373161396533613262326633343233653036656364636536636338623930316365353365 +61663435663333633435376535353963646566303130616364396539643366306635646538363464 +36623433663437363761313038666665306536353332633361316339666263306665636566393531 +66356565363839666130663866643134343632623837323630376638313131383036376464613331 +65366262663936623238303863303761313466656538343962326662646139363166616230323938 +35303265343132643938616330343833386261666432376666356532363933656461396633343831 +66623735623233363833656137626161643265633335396162626261633334613232363435613266 +39356663333532306662353538396565663666393063313630663133646365626461376363633735 +66343030306232396332313731313531333235636363383130373532633137383536316639626537 +33306330393737343563626234633531643334313363313466326564343530383337316238383362 +31306430313637613639356365343034393766313362613430646566643131646530313861383037 +31353636323963646236343866383166646633613539386333316164316532653036663236663039 +36306364613033616338643366303264343462393532393638316235303565333634396537353334 +36666361646237356430396636663032666264346561316533343438303238313537326263333766 +64313231303563626364663232363462643735346639373463376464383466636638663036353437 +64356338653862393637366233613366393635623637663332613961623733653362353234656130 +39383533626133313066646331343361303261376364356463383261356462333138643636306432 +34326135333137366339366561646337323134633033333365376666313763666630306261666263 +38386530356361633062663964333631393665343539366532316161616331383831396537323539 +66343138373463343434356465316662356233633861396337343762396366663361373161363838 +30363633313634623333383135623738343738396230343839316137633663396635643865373735 +64306238393833353065333731626131666134303264346662363433316161626632626430363861 +36656263313032643132383162386439613763333033396434613631656233653034353263386138 +35396465316664646339333630666432353064646461323463633237616338316362626538323234 +34343531633766396662306234356361646432393663303764323633333831636631323938396261 +65623064303234353861363261346139646633363965316337363962643430633864633061663462 +33393531613032386134303765373132306662633161383133376231636631643464393536636430 +32373238653166313231333733313865663862653537313736353338336634653032326465616565 +35626536386132363139303939623864613463633432643538383036616530323765376435643237 +65656463306533336661326462386636326630346664333838636332376530333432393235666332 +35663836313765363934313236333765386130623064653463323039666536646531646333633334 +32356439303333353536663638333138343965653663623465653938326565353535666662376364 +65613933323566313363366266343630396331366165306164663065343361666366616237316134 +31323236316163663030666265356531363266343337623561616163386336386266626634346436 +30303738626335313935303463363166313438663430363035336466346164373738306565653133 +30313134303563613835633739303137626562646533636263623336396233626266353130633262 +30396637613934616636643333316539653431323563616338343637303139373334386435626333 +37616265663435303130376137346361653463313934323837313331653662623261613962663833 +64386165343661663463643136643736333232346231396666383239313830386132356238343233 +65376535356238653663613336343632646234663633373238316363663162633535623962376135 +63396661373130383331623733343664653434336264343763303266303339626434316331336433 +32313066626262313438373631303337663232636538643863613939353266666463313733653830 +38666334663864373830313762346339653230393036653336663030346631333365613062633361 +36616538636336613930303065663637373433643937356461373030613733333434663736323864 +33376365323235333938353261653766663134313733393666623038646362666239373338336464 +38633433383938623463366335373439336331386336373631353232663662616261383831663339 +33663830653737306364633935393334306466383634326430326361613461316639393136373065 +34663234613434376430613365316438306162396239326531306638303132666266616238336538 +65393965646139383263366263376338396238393730643231616664323432653535613833363465 +38626166356463333938626131343638333635643661343162663931303937653835646364306164 +63663232653234383465353464616364656336653438313664313339313231633739303930316536 +32633037643561626362633231626634306335376333616562663039326466353165313962343434 +62613638313439663032366161666665663562623535643037646331323934616562623432323565 +35383939393134393931383030326237336535333862633637373730336339636239366439643863 +31626364306365303063346265343465623735346639386239616262393931643835393562326535 +62376536393965613035626664613034393533363961353864626666373639386133303634363034 +35323939323465366638386631656561636564303430643533323864333734386531376337353632 +38323161333865333663363239376133323063643931313464653464316161633637353165346233 +65373431663462656365326136346130663830313566313038303265366233626261356535633731 +65353666393935616634366335306363383831663462623034303434646431666138636231373336 +64366137336231376562313762343136646136353233326139346131336238343262623738336231 +35353031666632393237303834633536636161636561643463346339343164623732313537353433 +33343439373736336330383336303038393831386130323438633865623765623734316365313333 +33303265373562663734353938343762633230366564336137303134383661393638626465326561 +32633862393539316633616165666262623563333966353736363536656536343235303366633964 +61623434386335356435386464363035663162333761336361313263373138343965613538376435 +62646266346162363334363963653937353164653563343834346162323166386335383565353434 +30396434343465303538616463663532343938636135313739366632656332333733613737343536 +33323336356533663962386237333566323030306133666334366365373535333265333132353437 +35626435356234613539336232613335653662303365376163626535343163343036663631323031 +62393263396233316332373663613435356465616265363336393732666235373466306632383635 +64613962633435303834343231616264326666316366653433313232323835626238656439306361 +38326435363066366261363931333336613164353166353236373834316437303735623931613536 +31666335303330313664333566363063313337653163396531353137613562643039336338656164 +66663763313432386638323766323032386135643666313765313132393632323035643538313736 +38656439626261663636393034366466363437326561396261303636636265616237633537316139 +34353165303564343039656631313130316535336361393166656439623538313563343033373366 +63643862386464353330643537326331393133323765353436313136313435633261626534303736 +31346435343865396665663333323737333635343335346134343061613033393061343532623131 +66633864333838363937626135336231366434363634376635633566363536353461313866373836 +35356463313437313134323339366531633439353961346164626663613232656635666237363461 +62633832356433313934613763663433396666396435653433303733346639613762303534613939 +38343338663162346637633231626435333963376262353661313436623233616534373333313631 +34336433313964326339633466616533303031343935336266326663343937366331656539623535 +66383261303462346634626261363737386232633231616239633335303238383766643665663639 +35333731666432346665663736313435356231343132376532616439653963393934633037363366 +37363632333362333438646339336462396665373361616536306466306663623733626238646135 +37663863343339376162373065393130616538313939376137386566353361623937666330303538 +64653531653634636133353433353463323738316664666636643933646661353339326238316666 +64316137663236396234396130646533316438366337633437326539363130343765656639363334 +30663362393937393863633262323931336336343362646264656331343733386332356237653134 +62383966636130636231333539336465303838343365383135356464336139656637623233353236 +31643731326430313563613130626431353562383036373461353663363031663232353366393765 +64636261376666346132373161653430633934366165656139323232363463646461346336613964 +66313130636132323931316438613137356162313062343431393035303339306237343461326336 +38313261653231633836623739306438616439613730303332353434313934356431396565316238 +32306132646239336466663232386266656134633563646431613332393062633439646562303665 +65306665373532376362653737343061383036323535333330373831393635333663376237346565 +34646666633864363836626666623838333263633036656337656431616635353662613262336336 +62643965666364633865666432326137653864366435323332623536303465386661363162653064 +65393362336362313438366536613038316461376139376662356437343631636239633066306162 +38373638363233643531366338313434373034656635373731666230623663633430656266343933 +35663030316335646532393766306638346365396533326433646530313630636239356231646337 +62336163633162613862356138613863653432303064356638656135646264306363323664336263 +62323963333432376237626134323062626563623165386335313533356366333437353838363363 +64626230353262313138373535306538313765323435363732396437616134643931323232316463 +63396161653032653837613366346138366166313730656665623563333834653836656162393466 +65666436363465393934383732393963616236616263393366623130646134623730376462346532 +37373662306262366331323539323365356232323739333466383865653461313635356339633338 +61666330666338363533313462613739393863666439623033376336396364666365303432643137 +32663365376231333230663665326536333638303234663365303935626331626665663239323166 +64303966653566616630366432336264333639646236613038383134363336376363353961366435 +34313133346339306133303839313631663831353465376664656138616131653437626337343539 +33623834626263633939366238373232323165653236616361346533316463613063353064383633 +31366534303736316532306563366663363035636462323737633436326138306464313134323066 +33626538616536333338636664666334353832613135333832303862313061343363303362343461 +39316438396564613662356432346363336266356239303632323664393864343233633664636261 +35323831623133363937343639656637663133383637386635383137343165353932333665316434 +34393839633233396237643062623536393231373864396236613162323364663732666265633632 +32666134366137363463656131376630353130363538356334313239323939663263613863333161 +61386434306532313263326334623362623465336461326564313735333732333539653162343165 +34363861643532633137356466336430323961666133613031626631636631373464633832313035 +33343935643531383339326330303538333133666265313230373734303562366336363863353936 +33386566656633626239356334323765323435663233363265666231616363343537363438633766 +33613336366163653134333039333535303962366162613337356166393335366131643163316339 +31383132663331363762633662366462343663393062653866663766623830376135616263393839 +66326638316339633435306438663534356662336232303032376330383065336432623266383739 +64303866393235333561633434313763366333626631346563323662326537653339353834363961 +38393236333462343234663761336562383564613530323762626465643933386239396266313139 +31363138383138383236363464346163633166393232626634376533373862633039373964303331 +34323132636535663732646463656632333039333063356533663263633035666664343165623061 +64653637333637363632366562326431323932303233343433356431396564626632353334656631 +37623866633330313337653466353139393831373531656333313932376232363961663630353265 +31323864653664643030323531393736336165616632343766653032376665666263616438363064 +65646630613136343033343466643765656236393061613238356435383264383866363632333332 +61616437623065636135656330376262376130316336643263373362666336343331303534353965 +61316263633534396435373336363638326536363366383165353565373736353032633632646234 +31623233663333303232643163646630366432303933393131613461306264303932663038343435 +64376533363134343338643364626135386334316337373636653735373330386631336362623030 +32633562376164316637373639333230643065336436363163353263393630623065656263306231 +38366235366335326563323733303539383330383630613937326331303731396361653537373436 +62366333336131626138393839343463626436613664653137393731653332646661333136666232 +39383433643331383736356137663830396265336264316336393731343461356239393534313432 +37303838366264623034373538316234333536646436643661343363393161656633313530373465 +65313361656637623432393233326332373537383632316361333735663935363835646634396431 +36323335356637333530353366363661323762393131623737656238623036353937393730636333 +30613337386564383632333063396430306166396333346533333834343538356130353436323765 +38363664633330323163316233633864363262343732306363656237303534656466323231383130 +34636264343735316362643961386639353733336135613261303736313537333165323739343032 +65353363363161616531633462303539363734646661366464383334613734313137623731323939 +63323065366132623362333335663735306361393630326538613037383632663830356431373261 +33393063626436663933353864346264633535336532643136666363663237353030663761653933 +39623331366663373363356333373263643338653336343463333032646630323132636265333130 +64623765336336613161626336313361643265633735636436363037303432643265316465386335 +63316331363836626433393165356131396461333931646665613363313737613337333638306432 +33623063323065313865323732353362363333333331373766376465613638656465653035326531 +32366333363935666230666334653962623835303435336466663032623531616361643339666130 +31376265656165303864316239356339396665616461626362333862616364633432363135373332 +39313430303363623433623161643432643261613364393630633334303431366235393765653730 +38323639306161633434666161653564393436353031363131626336643664613232313463636431 +35633935393666376462656639363431333031363534383064333934663265396133373433616363 +39366461333034643631356264646362373439386161346337366165633639613939373930353562 +33643634646331376433373530623438373734353661623766623263616232376365656335636534 +35626665393636643830613466393061666338323464333230336666366135633661316537643764 +30653130356232646430653365303966303266336366373132636237623332363133323632646230 +33626361353464306566393438616465326266386262633566646134613166396635343733326635 +37363266386363633030356531643166333530313561303638346436376235623033363834316266 +36323363303636383334653533346335653939386237646436336437313161653932656331343064 +32646161623563303462386433356334306239646332383137613162363237613062323265616230 +38636334333762373138393833323231613062616535356664376239356433313264336163313138 +36353432613033333237353531656162616363353835376138666131306361623365323237353333 +64316364383866356338656237313539303434383064323831333832633063653264666261333032 +31336135613032366666613566303539626163313364303662356165643931353938356663636366 +38303063323337323661333464336339653833316163643235666133336438303930393766623931 +35386331323430333630616131663831653265396165323430333866626263356138323161303763 +37373233386562326462613364633138326535653238353662613864343835313031303362633131 +37373166363261313930316639646663396163646334623931313466663632393835633161393038 +39316363316330393266396564356338353037636632376133373231653864636365373638653438 +35326334386465623536343038336336643162616633343565383334373830656435363138636435 +36303764306235653534353161363162313764336138373332313338396134653134306338343561 +33346339646662636562333834313535316439303265306534353366353662623066363139663933 +39663165313666376362303838343765666162636337326565353761353132613737626539306163 +35383065336430396132663635663631393466663236653564366139663031326136383437383838 +32353239366232396235636132646531663563323661393332336361313738626437626335623463 +38316133646566653830633963613161303637333533363338663130663661656631646263323262 +64623032633866636237623665356436356165366165653666656161393865643931643730613664 +35663832363037653931313635313638343764316635633031616439626230323337303335623234 +32646330326632373738663465666236666565396162613361326464313965393830643237643865 +61653538333330613835623461353366633433336138653535336239343933643563633363656638 +38343966336630643030303665333563353661373064363934633566353363663334653939326263 +66623034633564383833653366323532386664633730376131656366613637363433376131356430 +64376337313930633030343230323662633762363538623331373035373166393432653836663836 +38303834313663383465376432656661343432373139653066323637643061323231383064356131 +32653065646636383534653462353534643931353035346432663266623431363066303064623435 +37613264616430386438616538643035623632336237656535343936376363616431633264303933 +34663465393337376539303431623666626339353237346338376637666331623762373132613866 +39353065653630373635383535316539303530353433376433653932636331623739633862616265 +63636163363765613730343061323930653735336364343239353633383461636133333065616632 +63396562333733306538353533646332353966396330616334636639373163323539353231623766 +35396261393066326232346330616133626634313964626531633234663637376238326666323561 +65383366373963323734366332633865623536623064643239366561623262633162353461326137 +38633862303932316362356366393861303366363335356134363638313533343434306161386261 +34323733666662313962613835613537393432623836633730306535366361336265646534353834 +64346461386630646130616663313035346232383533663863613364653461656564313834303961 +32346337346165313464326332393435356434653138343130363263396238343034646635343937 +30353361316435313634613930316237626162623562653036353966393362666438303637303333 +32383130323630336461323835373863396137343231306536613038393437623937636566376639 +36626634393035356136643831316664306662653061616464633237616566323437376634303634 +32633462383332663635623334353263313464613535323861313863343036653338343033303238 +39356239623163663330663131393334633961313066663266646631333464643366663637383434 +65323264376166666162346336383736666133666163356131326633336464613961653562663462 +63313935366166303061663066396532313830373936663865383132343466353233343165343961 +61343330616666323939643238636462353531343664343938366135323961366661323066663466 +35313635616637646336613830346165646664316464643266393665656465666263306662623062 +34306465636337633733643434373536393335363862316166373062353432626161353830336133 +37663531386463313334366634623533376131316138323337303738643238656661396633323838 +66663633306134353864643163363735303532373866323534333132373438383738366535353136 +66366262626636316435623436356263313037626431626133326339396164656535326563373932 +39373232306661356338333139313335323634636565303631356163383935663432303133346465 +37356234633362376633393265363736333132386432653961653536383136616236333263333263 +33356131396439393164633466303366343563643735343333336165636335616639663862656437 +37396262343131356665376638623236383634366532623064636636376638643133306666623733 +62343936653937663639646661353933306562643530393938313835313563343635333738303461 +38623534616363313862313366623762646531373262326666313736616461616232336537333037 +39643030333737643831333866656435663430313864656261333233336530326363653532376336 +63653532396539643966663230333435353533363239656561336531343231636362376538313037 +36633666316464376139643563313664343738373064393562633262393439393366393564666231 +34323731303839393266303465323766613864396461386465653739366634383461656537653732 +33313136366439623636636438363566333939303263346637353163613834396162303331663561 +35343237613031373065333636366336613732326662303463323461623839353439613132666563 +36646631393632613237663464373835636333366434356265363537653265383163653833663461 +31396665383334343966653166393762323837303735323366393335356230303033666232313361 +64383962636636643630326533393331313064346165313833333937303538343062653266303334 +66303063376265666538633565343166636562653639363533376637666631393764613438363333 +38636537363664643863613333633236663435323537303934613433386437666366333334363964 +34383738323339643836626562666566376336666262623736346535343639323737353163623439 +62633732353236373032646533633665313361653538396232616234623663663365396536633237 +61626366353062313665613836346233346631633131363061656662363864333065363234316230 +30613364343131366532356263313863333130653266623130636235303961353630376663396131 +32613633346434373630653663653536383933356133643666343966383532326236333537336135 +61633636653936303662646362333463653139366138373734356134326534656633316533393535 +66306337323164363334346663626663346535656137353534333731396537393835366261323233 +36633033613938386437386665366462323235656531656461313064333064393264366239346462 +38313737323962363465386435636539656432303162323665346531643139643438363630653538 +35636430336366353263396232316333396434616438613463313634633138616336306633643061 +63386335306261633739303532626261323566653762636262363430386134643735383937313136 +34643938356136623133623665363963623530363535353139653733393232333736383337383662 +35316237343935616538633861646238343438396131623061396232393331333038373432643465 +35643835396235323735636332383261303530653733613935646466626330333731323065303930 +33653061373765363439306464323761303464393136613864616665323837336664383238666634 +30396336303538313232633236326431313065663234323161653062363836323633363735366135 +35643562303534343832373632633962636636653562333666333563626166326435383732306332 +30383362393135646337616131633330393632613237333037353531323830363237643330643161 +66626563636635623464363533346466646133383538313730363538336637626538333830393164 +36343063346433633439313733303865383530336663636663643733396230333837353237313062 +30626330616234303039633736393161303863343234396262623436306136316366306432663930 +61346530633865396365396139613639326530353639333036313437383063633235366537626235 +62616365653761663566616133366536313338376162356662656432643532636633363838633637 +37643364643061656136323436626564363135636534383862613765616335323931643233393863 +61396463316564656136626365313065353038343936366134616136396461616265623331333633 +31613261656639333930653132633933336630663066613331386535373335333339313230616361 +33386535623363646631646262386463343031643138616464633961616137636633356238333864 +66633338343166323034333936396162663366373765353233393762373335656465656261343663 +63346330393161343236376665313639386136353265393431383563646665393462323336646263 +32616234376239386263613034626661383962656637363236323831633531313933386666313435 +39336539656530373137616138303361653331643637393066323665373132326162613461346434 +32616130386131663631396633353135323164333931393939386637356637303763663638376466 +35616230343862323037646139353838623031313361616265396136626561636338383063336238 +30393536373261396233373439633132303238323636396131386137306237643936636330353133 +38626135356238663536353733623337393061333465626531626232376430636231373162333463 +31666163316637303462663262313039663666383431373264323163663134636430663233346664 +36326636633038616531386334613762613736643038626335623835343864356366316266343131 +66383939636332613766363565346336386134306566616365336234383331383466356539306433 +64346138653536316336343931343538353235303565343663383866653139333132363035623465 +62363835303765643132643239376233386330383530373530386461663565613030303665396339 +39656262643563313064633832646565373236353235393032616532353733353630623566336265 +65653664303465393139303232636439663231326430393435336438633931303332633731393639 +30623430656465326136653361613734373835376661313135623032356562363830303139366337 +65636231323866366235313933643733313630376533343438643863386166656239336635393736 +34376232613362323839653139336261623034613334396234623432326563383737393562373939 +31653764393061656535663862333936643264613865366565386166663866666232366538373838 +65376534653866653864623237653337346431373730626335386630616137323164353464613463 +30336532386265376234663562306334313432623539336366653361653565643032643531633939 +30626165666134653264616639373830333130653263313534326337366466333032333939653263 +38636331653937353531646635653937376132313732313836353131343632353034663832383639 +36636338343563366566396166343734396636313866393938373266633832343832396664616266 +64613339616539303333656635636465313964383239376138643834393232323666386563303265 +64386437663539333234353461303763353930643861613461393865386638386633623633623937 +63663734306332313033646536616561316638643765623566616139653031336563383365636138 +65303930343461623535663834323132353533653333366630356431653733636566376336353464 +38376162636565323335343737376633653165396632323235663463323730326162316635346366 +39353736623262666462396338363765306264663232366463353966623239313666613839373530 +66613062633436333734383139323964326663323634373635636365663832303230636366333636 +65323737383633636338363066356135373166363936646262303236356166316533326437393735 +32363639303137623335643632323566373032656233363063313264396436626633313433636133 +35646464373665356136316536356530653966313935333931643639376537373735373331386537 +38646336356631623731653439613164303835643039346430353364653561626337333666616137 +33636434323938316661643939383937396533323661363365643164356538393765396134396433 +33343031633764643239643531373663633734646232656466643362383838393037363636323466 +31613433393132306364326430666639666561616664333035613863393335383034353039343265 +31633730363161646439373637643938356462343639383666636437623639653066323536653463 +39626239363130346539363961326331613764386531386436316564386135626335333439333539 +38353638653531353132323866663665663831353063623764346438323935643431326538336534 +30643934626236663762333062646363316635323735633336386339336366323861323438313137 +30356665663032653261356663373033326634623639396666396664363430303437303163376637 +61363866316434663433316565393363343938363131363635373934623233626463303731633662 +62663437616333363438366239323934656138663362376363353063633461333532356265336663 +66633366316433653038353734373566633330653737366363303164393536353232326465623063 +65333136306264326430393935306431646134383036626466663032643931633862323066363863 +32353231326334633031656562333266363436386535303465343437666431393234623662346137 +32626636656336653738333934383339646335316137326630373062663831636139383232343437 +66356138373465316431643937623432643965616130373239346661396330363433373834303932 +37363932623564303661323234376336323939646361636665386663653761353033666339303439 +66303261376263306566323135643835373365626463376262376666643331626236656139363134 +39373833393930363630663833633832613063333139326564633161346366326239623735376461 +34663063663232313138376336663836333136363639646539623661373635366264653965396431 +30633430333136656139613033353939653433393038323438313363346264333462653464346437 +37666532336136373762306533326633323135376538393932633165653932396334343530653166 +32323337623731366261376534616164613836333237323466323830313830396662633837313533 +32373238626339353031303136306139613639343437336566303936656435636531626464613366 +30636235356363306663366334396631613832613062363134313934646366303766636438653762 +64313931616563313939326365643635643730633562323834326433343238646362333631656630 +61643639316631303861633130326261613061353635333065656363653862366562663830366637 +33613938363538663736663934626464323538623831336664643935633866383562316637323132 +39393934656263633466636565316161376537343962383334323336643730323634326461303666 +33396364666339646135313039393538356436656338366536613934306431306664383532326331 +64623231363366313637613161316234336136613862633466363837653133343339376238393437 +36363638613635663564316138303365373165336239363935313631336533303562363165306366 +30363138666435653364633439303561626637333037663134633837303131376638613437643363 +39666139656636353630373631653631613664313965303335303138316634663139656337343064 +34653061333934326234336132303965303338383566613032396433393838333439656565633130 +63306564623635633835616264316138326630656532373235316538356230663966323730386164 +34623137626665613935646330383530326163316637306334323933643462623133363463626434 +38633730626432323732303361336462383361306433336335396162353838666233393364333932 +36626437613131303136633739386263363331646130346264333838646330326532646437353035 +32376231663466373439653531386230383565626465623938366637393866343566346135383935 +36623431636138383063333265636332633265646463353565636335363830616563356236343030 +64623365396237313138613163336239663765313831383765313538326539646437663732333031 +66343464646538633830326235643837366437313161666635643539393830656139356138343431 +61303734306161333235383532653935653133613333333531353265303136356362373932376135 +61623738336566363761316164353563376431623864623465343065663966616533333130623032 +30613661616232323430646564383466616630356461653866626666303165326364613861336639 +38333865663237343530643432613439626166633232303164663263323961643739363164343932 +39343330336530636364656336333539623732353431616334616333653665336361306431653535 +66393162653865383365313833373462356135343238623661333735393466396563316462306165 +31656136616539373834663430663266313564336639656137636538646538653735633761373534 +39303365303364326461653763333164353563613961396331653032363165316437356566646335 +30313262666230633533383966313437633235383232656133366165373434663264363266373466 +36333766366434343633656361326530383065616531363435313165343037353736653830373235 +36633534663738616436363330393136333066653935346466663234393563656266666362346564 +63396435303332626461643161646632623561313530396634336264633334313133636666323835 +66323634613864646566396562306661303438316135366138386662616361373432316237363166 +33656638316338373064656130613635653865636433383664663431393731376332656330613466 +64663636316166383663313861383136376234373863363135353733663166333638373364613435 +61366438346265643230623663303538623732623761376666383038656533343632303162633433 +37613238346237613666626430623262653764393664313633336536316535383765333562393362 +62326162343161623866386466633933613436383564666664346439333937623036656530666661 +37323331373464626637643065666230653165353735303634643966636630613335376162373231 +32306531333664653230613737363334656261326631633339383662636530316264353139343663 +63623662333336373563656236393034316263323438363361633835396238316362313561623236 +62353533383630333962323966663236316463633461366166333230356664646466363061633263 +61646666643735353230346433633765303931353466633837316161396336363161613664623861 +32663930373664376638386663343466663033626535643661303561353836346136333166613737 +39383164346463656137353237383662646263396366323838326165623037363736366664636462 +35663764333137656139376330353463363965626237366530663263656536336661613062646232 +32396131383436386539663935653061663263363161303765663966383035366137653265363463 +38383530383738306465396232336337646366666664666338663164363562626463326539393539 +30623838656238666639356239353535333637343439393233643136366337616431343165396563 +30633632653962623033306162633439643565333332626237663032383338396435633832383933 +39663336363934666638333839666463623763313638313735653137353734663432303963616232 +37386238353337356565633933383733373631356466616666613132633934626435643163346165 +31306234353866656430333566623761353531333930363431343233366362313032353365636163 +37623330356632626262373838376365626465373566666231663537666332356232313536613532 +66343265643536313436636563623933626232323431356164636265343464373536363837333837 +66663839393431653061396331303734353962373565653636366564396539303265393136356435 +65623138363463653330376431316330303334656538366461666364323137396265383663303262 +32393439343139386633643031666535623531316365653735323336353464383434643739356363 +63623364336364666535333134356535346136353461373839613639313964616131313235393736 +35613337326263633464383665333062356639626263306238306664396162343636316564616539 +65303730356266303361633939336637363930303865663632346535396238633934643332306434 +39383961653933376533383632653566343730613939393738393435663565633536363836376161 +61616466653662643963613765656336626535326166303962326133313562306134656266613863 +62356166353336333961313731363166396437663734353535663035663764356366396438326133 +39326264626235653132326363616165653835303731626630613166313361643364306466666463 +63363039343164636166626633326330656339623136653432333464376131663363636238343638 +63333433633861376131303136346335653033383361383963343839346137393538656364336134 +30386337306432313332626132383236333963366431333736363836326565336564353834373361 +35626535643266613334633736343061636365316539346330366431353134653264306162613332 +62323131316531333366303163373265653634396537613935326436343961633735323835356362 +64646137333065643830343131343565386566396238626461343165383363363430333737643635 +32666439633330303663653933633261333030626166643932613634653636323034386665613465 +65326461636265333563333730393730303238633666646463343531363131396234653134306531 +63633837313639336137663437353730356132313932666337643463383862376261356266656462 +31353936666331346463653363393036363032306566366562306138636234316265323538313364 +35303638663262613164313765306638306434333338626266663765306562366363376238666162 +31313964326431633736666466313066393736313662353436353665353136353136373038393162 +37333537336664616231323865666330336162383535373135613536656534666664663734336434 +34386365326133326165646563386366343264343435623461336434393962626330303065383732 +61323664336431303962313531303366616539616131363766656564313063303234386466633165 +62323337373664356138343236396361376137336564653930616263666236666233393531633563 +39653264626238636430373963643831643663363337373161376437653630323562313765373933 +35303165396132666134333832366264303164376536383065666431363039643336396332326233 +64373833303830313734616132666564653464656330323866346538623231303335363839613334 +34333236363866363039373238336431333666313536653433626435623337306364663939366261 +65383865653634316339643330626236376435623366633331343839643339333166353036653432 +31613161303339613531313536313539643430653666303432623933663637373734313366633930 +38393031313863343136326261316236373966373966356130643164313361396139343739356230 +36323465633262626430373531386164313365663233396332343965393934653135643832383230 +33653232366632373366366363636331316338386462333634666630393463333165653336323335 +65613361303663306430346630663762383134653130373134306338303038333365643064383765 +36636438663033643431616630346631393532626437626235306430306234326432353064663139 +64636662356630383138333935326165333235313764326436323864383938356638303331386666 +39326338343339633636623432343537633636626362616339656539326431306266636232613436 +34336534323363373331393962343730303161323538346335636463666635636461633234323465 +63343661343464336137343138343564616135333035636161646263326363363034383535303131 +64343062666635363133336135326565633738396332653663623432383134633265323739626438 +66313565333736306335653166306432326435346536323461313539393733653432366264666437 +31646438353361346166663733613835363338333032653261623830626532633665303631356134 +39613135373562663435303666363339346439386330636463313164383666313264656233313139 +63643639323964396130363637316230613734303035353165386661343737636363366137646539 +65306133393763323566343933663731363763313730376664326334613234666634643038636665 +36623636343566333666623963383365353231353137626236306636356631663433396238386364 +32316533343563616635393930396463353938346633633565356630353838303064346239346533 +31613231643039346339373032396163323161313133653237333966316665613132646662313935 +35316436616162373937383465363763663239366565323432393563326534646363646466316266 +30623166343636393331393232616335663862363264373038663035316261313938626334663964 +33326139613738346133663231356138653232633263643936393835396436383534326663303335 +62303537313730373536633334666130633765633339626631623966316661663538363437643430 +65363338653739623335313336626330666164613636366166356332363633343961643065646561 +65383466356463363566356230333939366535333335326333393838313331333862353030366364 +37383236663932323361653230373038366135383533633038613664353763373363393031616334 +30396138373163393262326238393363396364356533306166623432373165643938653561336664 +66383832383537356238653664323864616666623931396564656237333637376662346435663032 +32323261313561393662636139353438313036643135626634323465336139336162383066343765 +35633234623432366637323334616361333931306139323162343064643030393162303165633163 +64356235643037323365303836353634356336333635383031663438663536656233626465393361 +66633566336363333666613465613630623539316263613836386433303138333331396462333264 +65313362353736666234333563383039653832636165306264633966343266356239393761343934 +62363035373037396361303336356461303563323966663764643336393539623564373434383732 +65386631636137643636386430343165633837366333613038376135646637323031383533313937 +31376336313930376531643438323636323934303065643161653233616564376464313466643931 +31653433363233373731366261393066316531373365316166313531623230393062313832343438 +34643031613830653037643464393437636538663062376139386534343566393130643338346663 +32613134356134656333643434626163343938616234333861666234623233343732656165646161 +37316530323065356231376635656437346138666436313334303638653731643932643661656465 +63646263633035363066633561353134653336636464643231666233386339343232653239333864 +64663162653035353364656538336664356136333737373761333462376365626634333736306135 +36306337396131333564633438383963663036333935356262653533663031323066306164326366 +33666432353932393038316133636238633433303461323361666633386530326465633630383131 +62623035376232616639303864393566646630393063326463356666653535656438663538613066 +30343632303835336262396665356439343362626431303134313562663165323934303034386663 +30623530383936353965306563623161666535316231336232323031396239363764663635613231 +30306232646166393562626139363139356366383065626337663365633134386137343132636232 +64633139646130396364363037346262316635613461323763663163656435633165353131623436 +36363734383335353736346232326436623434626263323736356365653966333135663836623261 +38313461663838353165343563653361613735643435653638383265386536376266343534313636 +62663130613833306533386637333230363332386665633831643037393461386535323261396231 +31303733333938666634356633663734353432333764353136653730353565366638336538343938 +39326639346435313863663265633166393638343665393333353832353234316534356435363233 +38613035323861333662623662313462323762326533643632613866643633626632643039633234 +38346233333564343737363633366365383666633039323033373261303135613035373239343133 +38343961326661393838616165303438353832316334353966346666393233386631666163373662 +37373637613335616665623963323863343532666266383331393562383233336436373234396265 +66353766363631393366373563343034336266323164616262343863306136363766646430643263 +30323838393665333361636632313366323064376637626261616263616139613565656333353661 +63343263363930366565663537343538613130323537343462313365633763346635343935313964 +38356636643335666663373461306435636539396638336331653761323864373432303961396562 +31376131386363653366336331333232373833346331633536356632306638613036643834373833 +32333737353435633430663364383331383737623263303361356462303831623130343138643731 +65653439643839653263343632363837333337643732333165656438616563633030366334353965 +38386364393837363163306232323434646330323933323639313666636233326366313530653739 +37313964396161326336613238636133303136663434393336363963616265386436313835643234 +65343364663464356439363833383966383133613538333638303762366261353036323564623863 +65313030363138363362653538336235643832646262373163306634346164393634366261633964 +32353564396265303336323432353438656431643261663432323662376136326239376439393536 +39653833336662386364353161396539386534636464316433656662373836366534313136333430 +35303730353865333439316462366136663834626563323866663234313734666434393262333633 +61393636663062303337333432363931653761626632626336346462313863613931386536656435 +34666366616332366130343539383135613538366664343130333634316461636337633938383033 +65346234613863663561343234663535393930346634353530643231316536663834646266346338 +61353233356364336333646362336664366465646466363537396338343934383031666265666535 +38623163636135656562613666663166393866636364346366343733356339316465613330646538 +33646337633034343665656630316334366339626538373662616432323337313362333634643466 +63666235646432366535303438636662373830313864646639306362633463626437613932303161 +66393463346464666638333166326233613934303465386666633634633863363836616265383130 +65633334666330613363303039613562303739343961333863383763313938623337386634386238 +62656335663366303530346563353530656662303030356230383630386236393536346339643332 +35393766616534303934373136356461613461666332393632376537333864656666633835356463 +38616330383636313433653666396134343963663465393732363532613062656433663339353634 +65666530316434386165363535336637376266616662336431396533323838356534616536343732 +30326261323964636339356639356232383331666466323134333531346262363661663735656533 +34626162643834353862613731356130376132613063633830633135663238666231326338323365 +62363865616535623131383066653164336565333733336233613333613030313936363465393962 +30336331336336386564613564656639653037313238346137666234393431346563303434333638 +30366232386530386434613932333836316166653238313134356535393035663633393033613266 +64326566303565656438386532393665656338303832346133333031613761663838613864336565 +34353737363737366638653938346166356630373539353238316366363861313637623938326638 +30313432633532323539303936313462616635656232323832366561373331353331366630376663 +61343138363862613336303866373030333334306235626363616565623039373364656137666332 +34333735623964353464656131303533653031636339393335663739363234313134383865616365 +32313331323534383532663135353232643038373532626564373563383434303436636433366165 +34326465353737633336613038336534373166363636303165323035366533323939376238306235 +36386233356562356166303565366535376137636661313666623766393861666132363332333435 +33316234363137656564643639393739323736376434663132633864656161323261316534653063 +39666462343766623537336137373730636562313161666263613532333132316238643836323661 +39363562643039363562366663383737373737653439663930643466383938376664336436623337 +30376431633561656439393634633635636334363366383739613238636262316266313334373766 +39376133316331333937323131656230313734636532653437356634373935653365356339396236 +37613036666139623532656538393066313163303135396535356536303565616236646236366264 +62323233376235356462633034323639356465346136303138393234663164373834393565323062 +39383030613634396332643733313834366365313762636235306161313435313262616430633236 +34386332366262336231623732656431616630623235396538376663646262323761306663303539 +33366161303066653932333137326366363130373534306630626135313830326138343764373365 +66633462666666393461303265643239653531656631643930343730626336643037393232336163 +39376634326165623531633765666636383264613532323235663236353532336638313138666566 +63333632343161303630663431383037666265343564613636623238336564376239316665326461 +62613665353735633361383730663132356463336461653932313565306630323863663431336562 +34306537613530653934373434613463346636613465663761643065633235646365356266383761 +31303036613261376562333233623537333064313639636136306530393337373639663862356638 +30323365653136323535386134376335356531653133316530353061326666626536326239363366 +66623165623734386563316361313535393462633230646664626631396234303030376262616566 +33366234643065636633623338656331373761613432396530653839643836643537363863653139 +61643233393564393537356364613334633038633036343463383338653461623136383436353665 +34663136623332356434313664613434663032393737623039336631643133306661323432313663 +63316263353730613437393230633738346334653530663531306134316535663334316566623261 +66663539386363366335353265623939316636303938393131346434343565623266616637636332 +33616233636137613339343231386631636235346631386138356234386266326630346164343164 +37393063316633663863343830346466383636366234376466326366663239316438646366643635 +63326566616131333532353932643434386632616332333364613634376162323239633963303234 +33616132643031393039313933646434376232343862313736326639373436383466343336616535 +30323161343335646235663935636563336663306566316563356361386464373334313063656134 +35343866386234626565363866373534376364633431383062353439373566323266326662616263 +37376362366438353636636530353965646365316138303936373631623064666236616438373463 +65303431333738333236636336323037383739633839653663353463653638646635626265613430 +37616261386637643230636336303861616336646532306535326537623434383532623637666536 +37366566646466356432316461356538633834306364396338333931306231373530303462393465 +61326264616530636464343335623132376139653737636561313039353264376131636161633830 +39333737613433643962633862653434303165343232633765626335626338666232396264316530 +33373135633639366531366632643439306339393064343530666431336333643132623061306162 +30613438633232626163656134353739636463326538376338633164343166363631383336316266 +62653732643030353032353936646162626163636233616531646231636334323366626534636536 +65373033616139376466366662306566663363393164356638383431653437663366643866333263 +39306533306661363138303566656161633365386436623565376235313433356236636638633134 +63353233353030393833653363626663336561326437383161316531373364663666306330343063 +31323663636136396566643831633666323639303230636664393162366638626437343562643464 +39316239373338316136613839326663626462356138313839656239613835396638663037656232 +33393138363364353166663430663031383330356234376632333938353965376337346563393531 +63633065663332306535306466383165646663313938623434373831616431656237386363643637 +62313861643636353734626136366330376230636235663234303764656662373539333830653535 +31663030313334376531366631636636306265643235313635363736333334393239353262323238 +38326662363538636634303730303437336237666335666661366332333639663662383334313661 +30323836646630343062353562336334396235326235616536643835373163306235653562626532 +39373337333939666336333736366337653736633934343866393836316565373965613865386339 +35313762313436666162316534383462333961653932643666663866633961613961353962653962 +39326434646236393164383832613336386239663836396334653534616334346238343363393364 +66656330623839323134623939653761323561643632663234306333643662346364643834646362 +36346338656564633231303337333832663061303961396631653432346439623130623866356561 +34666465336362383833336262303637303964383636643231656565616361306666613732333734 +39303439336163383037663762316531343533343562383664626463623632626334303931626231 +35643665333161303564373662313462393732623663343530666430393636356166383636343761 +30613633383438613262646430623562363864363935353764613064636530616537613339616538 +65386262663236356237353834393331353633653462353034306164643334646364383533646632 +31636538376462303532633762373164383431613538663234396236313761323666613238646132 +62313832663335633430666632616466313930383236653231343035623233373538616562373834 +32323566353235613430643630313161353830623263313931323135633134393833376661353037 +36306234376532653232316133336434396463616536393738633439313036646364326133396633 +61393538636137376436636431356162366435626665326239646664363233383030353635333865 +31396161613062626561376337653262643861353061633863616633623737323134653334636663 +66376465386662656362313466356264323062366130336632323333373935626535303533633766 +65336636646338623039363764306634366438656137303238646361666132636464313338363262 +35613936386534656134633266356437663733346533653831386336343061336365373330363164 +39313038356163656233343834346632643435393764326166376639383563633637626464353131 +33653865326262626461653664616532343436376466666566313961343462666633653736626235 +30336135613338333166323339316363623339346566353132313165353436353465643438353238 +65646262666135616638323065316630646538383038663635646561343235366264643739373038 +33376361653438306137373632306130303733363535386664613332666662663565626266346239 +65333461653632343364323039613461336130383831376437363066656461386566363366316236 +31326630313339333732356634363162316261336666633766636665643463613061313263386261 +37376434616431383132633161633130306638643833366434323339353838303933363234663137 +62663666306534623731366333383963656638653836323864326431303334646535313532656137 +30636266353332626461636639353235303638656431653361366633356361623330393136643539 +33366635646438313432396235386630386537336235383637326632303463333664656634613661 +64323361666430343735623565623532353161653763313965666338636462313463346233666462 +65656161646531353134323832633936636533363761376131636536636661316433383837663065 +34653766636466636336643231636463623638373131633838343538383961383334613631303936 +37353734363132306534633531346261623239616338613034656335363033333164393938633430 +61663239316233643462333739626634303664353531656165303432353061306136333764373832 +63343438633063386464663734643064666330393036303333663764663736303664653239336237 +65353534633731366232333232613133633661653764303036363063323966353965343365366639 +39396161383966343264646362636632303235376165663137343436346134333930656161623632 +34636565306666376433353532396663383339333534343766333363303232343530663431386635 +30346637656665396163613232316365353537633062316532656661326362336230323331313230 +61633563353064333965636437643162613665316230633438666133663465663566373266353639 +36376264303666663431643963323731356531343234313337363533663862386366636565646231 +31346165663236336335623661356432373431366332633933313536633238353435376631396433 +31383835323934323434383965383531626366653437316135643036336339316635323534393137 +65366465346666313838383137346238646666383033346237386333626562313238333730323130 +65316530326639663661346362663862303862316161623463376536356265643462306466653138 +66306361353536386662343162353930383865666433623030313830623735353731363562623162 +39643463646237336638653136383663346131626264653861326164646336656363343639383064 +64313965636434663762646261386336626533643739343063323339343333313739343634613937 +34383839336338383736386561616166386634333533616530383635346262643239386161643136 +38323437643839323832623162636532376338346636666266663538653863633735613632386431 +65363462313538396335633033633966396339626632396430353461343864323463653162393666 +39643566613836616135323439636165626338333439373738313130666138343430386431353636 +33646134623134363462383765643365626435366433363830653836653731333731343161643166 +39653838373632636131656333306162633233313765316138613136336235343137343037656435 +62363065326330343663343766666165356662613133373634633036316435626562366165643035 +65636463356430666266343264396531636266363065303137636332626435616264353232383633 +61666635356531336632623338633230336166626261623635613439366162383035653437383130 +62386635653864306334613861383433353562363562306633653866633531663266356561363930 +35323163616162353163323330636330623865633636343264353939356137336531323964346261 +32363133613232656661653863306138393730386265353366393633373231323835303736306332 +62353039313539663936393530643063633936643162373933303665346337643133343866323831 +31386131646632323433613931623331316236353866363062343632613163363366383633393736 +35633830656366373030623431336138346237663336333733336136363735643962333261323135 +37356633653331343166323534393465316465383731633536343836653362663839376439343861 +39373661326336323535373534306362326533663464306462303533306137623434 diff --git a/fdio.infra.ansible/vault_pass b/fdio.infra.ansible/vault_pass new file mode 100644 index 0000000000..e1d46efc1f --- /dev/null +++ b/fdio.infra.ansible/vault_pass @@ -0,0 +1 @@ +Csit1234 diff --git a/fdio.infra.ansible/vpp_device.yaml b/fdio.infra.ansible/vpp_device.yaml new file mode 100644 index 0000000000..2ffea31f6b --- /dev/null +++ b/fdio.infra.ansible/vpp_device.yaml @@ -0,0 +1,40 @@ +--- +# file: vpp_device.yaml + +- hosts: vpp_device + remote_user: testuser + become: yes + become_user: root + gather_facts: false + pre_tasks: + - name: Gathering Facts + gather_facts: + tags: + - always + roles: + - role: user_add + tags: user_add + - role: baremetal + tags: baremetal + - role: common + tags: common + - role: docker + tags: docker + - role: nomad + tags: nomad + - role: consul + tags: consul + - role: prometheus_exporter + tags: prometheus_exporter + - role: jenkins_job_health_exporter + tags: jenkins_job_health_exporter + - role: cadvisor + tags: cadvisor + - role: vpp_device + tags: vpp_device + - role: kernel_vm + tags: kernel_vm + - role: csit_sut_image + tags: csit_sut_image + - role: cleanup + tags: cleanup -- cgit 1.2.3-korg