aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools/testbed-setup/ansible
diff options
context:
space:
mode:
authorpmikus <pmikus@cisco.com>2020-06-08 06:43:19 +0000
committerPeter Mikus <pmikus@cisco.com>2020-06-09 15:26:42 +0000
commite6be7e0ec3a3626b3741eeac81ec4a25a723850b (patch)
tree1d61767c5605df323866425321747cb972d9412a /resources/tools/testbed-setup/ansible
parentbe5265bc9612f5aedf4f25720c4bd8941e359410 (diff)
Infra: Ansible Nomad
Signed-off-by: pmikus <pmikus@cisco.com> Change-Id: I2560e3e12b457413db81a7ecc52efd7e39f1aea2
Diffstat (limited to 'resources/tools/testbed-setup/ansible')
-rw-r--r--resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.34.yaml39
-rw-r--r--resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.35.yaml39
-rw-r--r--resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.66.yaml15
-rw-r--r--resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.67.yaml17
-rw-r--r--resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.68.yaml15
-rw-r--r--resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.69.yaml14
-rw-r--r--resources/tools/testbed-setup/ansible/inventories/lf_inventory/hosts2
-rw-r--r--resources/tools/testbed-setup/ansible/roles/iperf/tasks/main.yaml1
-rw-r--r--resources/tools/testbed-setup/ansible/roles/nomad/templates/client.hcl.j24
9 files changed, 145 insertions, 1 deletions
diff --git a/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.34.yaml b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.34.yaml
new file mode 100644
index 0000000000..d0afca9164
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.34.yaml
@@ -0,0 +1,39 @@
+---
+# file: host_vars/10.30.51.34.yaml
+
+hostname: "s44-nomad"
+inventory_cimc_hostname: "10.30.50.34"
+
+# User management.
+users:
+ - username: localadmin
+ groups: [adm, sudo]
+ password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1"
+ ssh_key:
+ - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com"
+ - username: testuser
+ groups: [adm, sudo]
+ password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1"
+ ssh_key:
+ - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com"
+
+# Nomad settings.
+nomad_certificates:
+ - src: "{{ vault_nomad_v2_ca_file }}"
+ dest: "{{ nomad_ca_file }}"
+ - src: "{{ vault_nomad_v2_cert_file }}"
+ dest: "{{ nomad_cert_file }}"
+ - src: "{{ vault_nomad_v2_key_file }}"
+ dest: "{{ nomad_key_file }}"
+nomad_datacenter: "yul1"
+nomad_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ=="
+nomad_name: "{{ hostname }}-{{ ansible_architecture }}"
+nomad_node_role: "client"
+nomad_node_class: "builder"
+nomad_options:
+ driver.raw_exec.enable: 1
+ docker.cleanup.image: false
+ docker.privileged.enabled: true
+ driver.whitelist: "docker,raw_exec,exec"
+nomad_retry_servers: [ "10.30.51.32", "10.30.51.33" ]
+nomad_servers: [ "10.30.51.33:4647" ]
diff --git a/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.35.yaml b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.35.yaml
new file mode 100644
index 0000000000..75980daf86
--- /dev/null
+++ b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.35.yaml
@@ -0,0 +1,39 @@
+---
+# file: host_vars/10.30.51.35.yaml
+
+hostname: "s45-nomad"
+inventory_cimc_hostname: "10.30.50.35"
+
+# User management.
+users:
+ - username: localadmin
+ groups: [adm, sudo]
+ password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1"
+ ssh_key:
+ - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com"
+ - username: testuser
+ groups: [adm, sudo]
+ password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1"
+ ssh_key:
+ - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com"
+
+# Nomad settings.
+nomad_certificates:
+ - src: "{{ vault_nomad_v2_ca_file }}"
+ dest: "{{ nomad_ca_file }}"
+ - src: "{{ vault_nomad_v2_cert_file }}"
+ dest: "{{ nomad_cert_file }}"
+ - src: "{{ vault_nomad_v2_key_file }}"
+ dest: "{{ nomad_key_file }}"
+nomad_datacenter: "yul1"
+nomad_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ=="
+nomad_name: "{{ hostname }}-{{ ansible_architecture }}"
+nomad_node_role: "client"
+nomad_node_class: "builder"
+nomad_options:
+ driver.raw_exec.enable: 1
+ docker.cleanup.image: false
+ docker.privileged.enabled: true
+ driver.whitelist: "docker,raw_exec,exec"
+nomad_retry_servers: [ "10.30.51.32", "10.30.51.33" ]
+nomad_servers: [ "10.30.51.33:4647" ]
diff --git a/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.66.yaml b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.66.yaml
index 8ebe3a1a25..16e74c2bc4 100644
--- a/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.66.yaml
+++ b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.66.yaml
@@ -2,8 +2,22 @@
# file: host_vars/10.30.51.66.yaml
hostname: "s51-nomad"
+inventory_ipmi_hostname: "10.30.50.66"
cpu_microarchitecture: "thunderx2"
+# User management.
+users:
+ - username: localadmin
+ groups: [adm, sudo]
+ password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1"
+ ssh_key:
+ - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com"
+ - username: testuser
+ groups: [adm, sudo]
+ password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1"
+ ssh_key:
+ - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com"
+
# Nomad settings.
nomad_certificates:
- src: "{{ vault_nomad_v1_ca_file }}"
@@ -22,3 +36,4 @@ nomad_options:
docker.privileged.enabled: true
driver.whitelist: "docker,raw_exec,exec"
nomad_servers: [ "10.30.51.32:4647", "10.30.51.33:4647" ]
+nomad_cpu_total_compute: "40000"
diff --git a/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.67.yaml b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.67.yaml
index fad127ced5..e136211469 100644
--- a/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.67.yaml
+++ b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.67.yaml
@@ -1,9 +1,23 @@
-i---
+---
# file: host_vars/10.30.51.67.yaml
hostname: "s49-nomad"
+inventory_ipmi_hostname: "10.30.50.67"
cpu_microarchitecture: "thunderx2"
+# User management.
+users:
+ - username: localadmin
+ groups: [adm, sudo]
+ password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1"
+ ssh_key:
+ - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com"
+ - username: testuser
+ groups: [adm, sudo]
+ password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1"
+ ssh_key:
+ - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com"
+
# Nomad settings.
nomad_certificates:
- src: "{{ vault_nomad_v1_ca_file }}"
@@ -22,3 +36,4 @@ nomad_options:
docker.privileged.enabled: true
driver.whitelist: "docker,raw_exec,exec"
nomad_servers: [ "10.30.51.32:4647", "10.30.51.33:4647" ]
+nomad_cpu_total_compute: "40000"
diff --git a/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.68.yaml b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.68.yaml
index 490eb5675b..aed0040301 100644
--- a/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.68.yaml
+++ b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.68.yaml
@@ -2,8 +2,22 @@
# file: host_vars/10.30.51.68.yaml
hostname: "s50-nomad"
+inventory_ipmi_hostname: "10.30.50.68"
cpu_microarchitecture: "thunderx2"
+# User management.
+users:
+ - username: localadmin
+ groups: [adm, sudo]
+ password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1"
+ ssh_key:
+ - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com"
+ - username: testuser
+ groups: [adm, sudo]
+ password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1"
+ ssh_key:
+ - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com"
+
# Nomad settings.
nomad_certificates:
- src: "{{ vault_nomad_v1_ca_file }}"
@@ -22,3 +36,4 @@ nomad_options:
docker.privileged.enabled: true
driver.whitelist: "docker,raw_exec,exec"
nomad_servers: [ "10.30.51.32:4647", "10.30.51.33:4647" ]
+nomad_cpu_total_compute: "40000"
diff --git a/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.69.yaml b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.69.yaml
index 1d9062c5f6..a60098d13b 100644
--- a/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.69.yaml
+++ b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.69.yaml
@@ -2,11 +2,25 @@
# file: host_vars/10.30.51.69.yaml
hostname: "s27-t13-sut1"
+inventory_ipmi_hostname: "10.30.50.69"
vfs_data_file: "csit-initialize-vfs-tx2.sh"
grub:
nr_hugepages: 57344
cpu_microarchitecture: "thunderx2"
+# User management.
+users:
+ - username: localadmin
+ groups: [adm, sudo]
+ password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1"
+ ssh_key:
+ - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com"
+ - username: testuser
+ groups: [adm, sudo]
+ password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1"
+ ssh_key:
+ - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com"
+
# Nomad settings.
nomad_certificates:
- src: "{{ vault_nomad_v1_ca_file }}"
diff --git a/resources/tools/testbed-setup/ansible/inventories/lf_inventory/hosts b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/hosts
index 7a12a2b184..3c8076d00e 100644
--- a/resources/tools/testbed-setup/ansible/inventories/lf_inventory/hosts
+++ b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/hosts
@@ -52,6 +52,8 @@ all:
10.30.51.30: #s40-nomad - haswell
10.30.51.32: #s42-nomad - ivy bridge
10.30.51.33: #s43-nomad - ivy bridge
+ 10.30.51.34: #s44-nomad - ivy bridge
+ 10.30.51.35: #s45-nomad - ivy bridge
10.32.8.14: #s46-nomad - skylake
10.32.8.15: #s47-nomad - skylake
10.32.8.16: #s48-nomad - skylake
diff --git a/resources/tools/testbed-setup/ansible/roles/iperf/tasks/main.yaml b/resources/tools/testbed-setup/ansible/roles/iperf/tasks/main.yaml
index db9b104c36..8233ba7113 100644
--- a/resources/tools/testbed-setup/ansible/roles/iperf/tasks/main.yaml
+++ b/resources/tools/testbed-setup/ansible/roles/iperf/tasks/main.yaml
@@ -13,6 +13,7 @@
get_url:
url: "https://downloads.es.net/pub/iperf/iperf-{{ item }}.tar.gz"
dest: "{{ iperf_target_dir }}/iperf-{{ item }}.tar.gz"
+ validate_certs: false
mode: 0644
loop: "{{ iperf_version }}"
tags:
diff --git a/resources/tools/testbed-setup/ansible/roles/nomad/templates/client.hcl.j2 b/resources/tools/testbed-setup/ansible/roles/nomad/templates/client.hcl.j2
index c097d214a5..f15616144f 100644
--- a/resources/tools/testbed-setup/ansible/roles/nomad/templates/client.hcl.j2
+++ b/resources/tools/testbed-setup/ansible/roles/nomad/templates/client.hcl.j2
@@ -3,6 +3,10 @@ client {
no_host_uuid = {{ nomad_no_host_uuid | bool | lower }}
node_class = "{{ nomad_node_class }}"
+ {% if nomad_cpu_total_compute is defined -%}
+ cpu_total_compute = {{ nomad_cpu_total_compute }}
+ {% endif -%}
+
{% if nomad_servers -%}
servers = [ {% for ip_port in nomad_servers -%} "{{ ip_port }}" {% if not loop.last %},{% endif %}{%- endfor -%} ]
{% endif %}
/span> VppIpRoute, VppRoutePath, FibPathProto, VppIpTable from vpp_srv6 import SRv6LocalSIDBehaviors, VppSRv6LocalSID, VppSRv6Policy, \ SRv6PolicyType, VppSRv6Steering, SRv6PolicySteeringTypes import scapy.compat from scapy.packet import Raw from scapy.layers.l2 import Ether, Dot1Q from scapy.layers.inet6 import IPv6, UDP, IPv6ExtHdrSegmentRouting from scapy.layers.inet import IP, UDP from util import ppp class TestSRv6(VppTestCase): """ SRv6 Static Proxy plugin Test Case """ @classmethod def setUpClass(self): super(TestSRv6, self).setUpClass() @classmethod def tearDownClass(cls): super(TestSRv6, cls).tearDownClass() def setUp(self): """ Perform test setup before each test case. """ super(TestSRv6, self).setUp() # packet sizes, inclusive L2 overhead self.pg_packet_sizes = [64, 512, 1518, 9018] # reset packet_infos self.reset_packet_infos() def tearDown(self): """ Clean up test setup after each test case. """ self.teardown_interfaces() super(TestSRv6, self).tearDown() def configure_interface(self, interface, ipv6=False, ipv4=False, ipv6_table_id=0, ipv4_table_id=0): """ Configure interface. :param ipv6: configure IPv6 on interface :param ipv4: configure IPv4 on interface :param ipv6_table_id: FIB table_id for IPv6 :param ipv4_table_id: FIB table_id for IPv4 """ self.logger.debug("Configuring interface %s" % (interface.name)) if ipv6: self.logger.debug("Configuring IPv6") interface.set_table_ip6(ipv6_table_id) interface.config_ip6() interface.resolve_ndp(timeout=5) if ipv4: self.logger.debug("Configuring IPv4") interface.set_table_ip4(ipv4_table_id) interface.config_ip4() interface.resolve_arp() interface.admin_up() def setup_interfaces(self, ipv6=[], ipv4=[], ipv6_table_id=[], ipv4_table_id=[]): """ Create and configure interfaces. :param ipv6: list of interface IPv6 capabilities :param ipv4: list of interface IPv4 capabilities :param ipv6_table_id: list of intf IPv6 FIB table_ids :param ipv4_table_id: list of intf IPv4 FIB table_ids :returns: List of created interfaces. """ # how many interfaces? if len(ipv6): count = len(ipv6) else: count = len(ipv4) self.logger.debug("Creating and configuring %d interfaces" % (count)) # fill up ipv6 and ipv4 lists if needed # not enabled (False) is the default if len(ipv6) < count: ipv6 += (count - len(ipv6)) * [False] if len(ipv4) < count: ipv4 += (count - len(ipv4)) * [False] # fill up table_id lists if needed # table_id 0 (global) is the default if len(ipv6_table_id) < count: ipv6_table_id += (count - len(ipv6_table_id)) * [0] if len(ipv4_table_id) < count: ipv4_table_id += (count - len(ipv4_table_id)) * [0] # create 'count' pg interfaces self.create_pg_interfaces(range(count)) # setup all interfaces for i in range(count): intf = self.pg_interfaces[i] self.configure_interface(intf, ipv6[i], ipv4[i], ipv6_table_id[i], ipv4_table_id[i]) if any(ipv6): self.logger.debug(self.vapi.cli("show ip6 neighbors")) if any(ipv4): self.logger.debug(self.vapi.cli("show ip4 neighbors")) self.logger.debug(self.vapi.cli("show interface")) self.logger.debug(self.vapi.cli("show hardware")) return self.pg_interfaces def teardown_interfaces(self): """ Unconfigure and bring down interface. """ self.logger.debug("Tearing down interfaces") # tear down all interfaces # AFAIK they cannot be deleted for i in self.pg_interfaces: self.logger.debug("Tear down interface %s" % (i.name)) i.admin_down() i.unconfig() i.set_table_ip4(0) i.set_table_ip6(0) def test_SRv6_End_AS_IPv6_noSRH(self): """ Test SRv6 End.AS behavior with IPv6 traffic and no SRH rewrite. """ self.run_SRv6_End_AS_IPv6( sid_list=['a1::', 'a2::a6', 'a3::'], test_sid_index=1, rewrite_src_addr='a2::') def test_SRv6_End_AS_IPv6_SRH(self): """ Test SRv6 End.AS behavior with IPv6 traffic and SRH rewrite. """ self.run_SRv6_End_AS_IPv6( sid_list=['a1::a6', 'a2::', 'a3::'], test_sid_index=0, rewrite_src_addr='a1::') def test_SRv6_End_AS_IPv4_noSRH(self): """ Test SRv6 End.AS behavior with IPv4 traffic and no SRH rewrite. """ self.run_SRv6_End_AS_IPv4( sid_list=['a1::', 'a2::a6', 'a3::'], test_sid_index=1, rewrite_src_addr='a2::') def test_SRv6_End_AS_IPv4_SRH(self): """ Test SRv6 End.AS behavior with IPv4 traffic and SRH rewrite. """ self.run_SRv6_End_AS_IPv4( sid_list=['a1::a6', 'a2::', 'a3::'], test_sid_index=0, rewrite_src_addr='a1::') def test_SRv6_End_AS_L2_noSRH(self): """ Test SRv6 End.AS behavior with L2 traffic and no SRH rewrite. """ self.run_SRv6_End_AS_L2( sid_list=['a1::', 'a2::a6', 'a3::'], test_sid_index=1, rewrite_src_addr='a2::') def test_SRv6_End_AS_L2_SRH(self): """ Test SRv6 End.AS behavior with L2 traffic and SRH rewrite. """ self.run_SRv6_End_AS_L2( sid_list=['a1::a6', 'a2::', 'a3::'], test_sid_index=0, rewrite_src_addr='a1::') def run_SRv6_End_AS_L2(self, sid_list, test_sid_index, rewrite_src_addr): """ Run SRv6 End.AS test with L2 traffic. """ self.rewrite_src_addr = rewrite_src_addr self.rewrite_sid_list = sid_list[test_sid_index + 1::] # send traffic to one destination interface # source and destination interfaces are IPv6 only self.setup_interfaces(ipv6=[True, False]) # configure route to next segment route = VppIpRoute(self, sid_list[test_sid_index + 1], 128, [VppRoutePath(self.pg0.remote_ip6, self.pg0.sw_if_index)]) route.add_vpp_config() # configure SRv6 localSID behavior cli_str = "sr localsid address " + sid_list[test_sid_index] \ + " behavior end.as" \ + " oif " + self.pg1.name \ + " iif " + self.pg1.name \ + " src " + self.rewrite_src_addr for s in self.rewrite_sid_list: cli_str += " next " + s self.vapi.cli(cli_str) # log the localsids self.logger.debug(self.vapi.cli("show sr localsid")) # send one packet per packet size count = len(self.pg_packet_sizes) # prepare L2 in SRv6 headers packet_header1 = self.create_packet_header_IPv6_SRH_L2( sidlist=sid_list[::-1], segleft=len(sid_list) - test_sid_index - 1, vlan=0) # generate packets (pg0->pg1) pkts1 = self.create_stream(self.pg0, self.pg1, packet_header1, self.pg_packet_sizes, count) # send packets and verify received packets self.send_and_verify_pkts(self.pg0, pkts1, self.pg1, self.compare_rx_tx_packet_End_AS_L2_out) # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # prepare L2 header for returning packets packet_header2 = self.create_packet_header_L2() # generate returning packets (pg1->pg0) pkts2 = self.create_stream(self.pg1, self.pg0, packet_header2, self.pg_packet_sizes, count) # send packets and verify received packets self.send_and_verify_pkts(self.pg1, pkts2, self.pg0, self.compare_rx_tx_packet_End_AS_L2_in) # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # remove SRv6 localSIDs self.vapi.cli("sr localsid del address " + sid_list[test_sid_index]) # cleanup interfaces self.teardown_interfaces() def run_SRv6_End_AS_IPv6(self, sid_list, test_sid_index, rewrite_src_addr): """ Run SRv6 End.AS test with IPv6 traffic. """ self.rewrite_src_addr = rewrite_src_addr self.rewrite_sid_list = sid_list[test_sid_index + 1::] # send traffic to one destination interface # source and destination interfaces are IPv6 only self.setup_interfaces(ipv6=[True, True]) # configure route to next segment route = VppIpRoute(self, sid_list[test_sid_index + 1], 128, [VppRoutePath(self.pg0.remote_ip6, self.pg0.sw_if_index)]) route.add_vpp_config() # configure SRv6 localSID behavior cli_str = "sr localsid address " + sid_list[test_sid_index] \ + " behavior end.as" \ + " nh " + self.pg1.remote_ip6 \ + " oif " + self.pg1.name \ + " iif " + self.pg1.name \ + " src " + self.rewrite_src_addr for s in self.rewrite_sid_list: cli_str += " next " + s self.vapi.cli(cli_str) # log the localsids self.logger.debug(self.vapi.cli("show sr localsid")) # send one packet per packet size count = len(self.pg_packet_sizes) # prepare IPv6 in SRv6 headers packet_header1 = self.create_packet_header_IPv6_SRH_IPv6( sidlist=sid_list[::-1], segleft=len(sid_list) - test_sid_index - 1) # generate packets (pg0->pg1) pkts1 = self.create_stream(self.pg0, self.pg1, packet_header1, self.pg_packet_sizes, count) # send packets and verify received packets self.send_and_verify_pkts(self.pg0, pkts1, self.pg1, self.compare_rx_tx_packet_End_AS_IPv6_out) # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # prepare IPv6 header for returning packets packet_header2 = self.create_packet_header_IPv6() # generate returning packets (pg1->pg0) pkts2 = self.create_stream(self.pg1, self.pg0, packet_header2, self.pg_packet_sizes, count) # send packets and verify received packets self.send_and_verify_pkts(self.pg1, pkts2, self.pg0, self.compare_rx_tx_packet_End_AS_IPv6_in) # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # remove SRv6 localSIDs self.vapi.cli("sr localsid del address " + sid_list[test_sid_index]) # cleanup interfaces self.teardown_interfaces() def run_SRv6_End_AS_IPv4(self, sid_list, test_sid_index, rewrite_src_addr): """ Run SRv6 End.AS test with IPv4 traffic. """ self.rewrite_src_addr = rewrite_src_addr self.rewrite_sid_list = sid_list[test_sid_index + 1::] # send traffic to one destination interface # source and destination interfaces are IPv6 only self.setup_interfaces(ipv6=[True, False], ipv4=[True, True]) # configure route to next segment route = VppIpRoute(self, sid_list[test_sid_index + 1], 128, [VppRoutePath(self.pg0.remote_ip6, self.pg0.sw_if_index)]) route.add_vpp_config() # configure SRv6 localSID behavior cli_str = "sr localsid address " + sid_list[test_sid_index] \ + " behavior end.as" \ + " nh " + self.pg1.remote_ip4 \ + " oif " + self.pg1.name \ + " iif " + self.pg1.name \ + " src " + self.rewrite_src_addr for s in self.rewrite_sid_list: cli_str += " next " + s self.vapi.cli(cli_str) # log the localsids self.logger.debug(self.vapi.cli("show sr localsid")) # send one packet per packet size count = len(self.pg_packet_sizes) # prepare IPv4 in SRv6 headers packet_header1 = self.create_packet_header_IPv6_SRH_IPv4( sidlist=sid_list[::-1], segleft=len(sid_list) - test_sid_index - 1) # generate packets (pg0->pg1) pkts1 = self.create_stream(self.pg0, self.pg1, packet_header1, self.pg_packet_sizes, count) # send packets and verify received packets self.send_and_verify_pkts(self.pg0, pkts1, self.pg1, self.compare_rx_tx_packet_End_AS_IPv4_out) # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # prepare IPv6 header for returning packets packet_header2 = self.create_packet_header_IPv4() # generate returning packets (pg1->pg0) pkts2 = self.create_stream(self.pg1, self.pg0, packet_header2, self.pg_packet_sizes, count) # send packets and verify received packets self.send_and_verify_pkts(self.pg1, pkts2, self.pg0, self.compare_rx_tx_packet_End_AS_IPv4_in) # log the localsid counters self.logger.info(self.vapi.cli("show sr localsid")) # remove SRv6 localSIDs self.vapi.cli("sr localsid del address " + sid_list[test_sid_index]) # cleanup interfaces self.teardown_interfaces() def compare_rx_tx_packet_End_AS_IPv6_in(self, tx_pkt, rx_pkt): """ Compare input and output packet after passing End.AS :param tx_pkt: transmitted packet :param rx_pkt: received packet """ # get first (outer) IPv6 header of rx'ed packet rx_ip = rx_pkt.getlayer(IPv6) rx_srh = None tx_ip = tx_pkt.getlayer(IPv6) # expected segment-list (SRH order) tx_seglist = self.rewrite_sid_list[::-1] # received ip.src should be equal to SR Policy source self.assertEqual(rx_ip.src, self.rewrite_src_addr) # received ip.dst should be equal to expected sidlist[lastentry] self.assertEqual(rx_ip.dst, tx_seglist[-1]) if len(tx_seglist) > 1: # rx'ed packet should have SRH self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting)) # get SRH rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting) # rx'ed seglist should be equal to expected seglist self.assertEqual(rx_srh.addresses, tx_seglist) # segleft should be equal to size expected seglist-1 self.assertEqual(rx_srh.segleft, len(tx_seglist)-1) # segleft should be equal to lastentry self.assertEqual(rx_srh.segleft, rx_srh.lastentry) # get payload payload = rx_srh.payload else: # rx'ed packet should NOT have SRH self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting)) # get payload payload = rx_ip.payload # the whole rx'ed pkt beyond SRH should be equal to tx'ed pkt # except for the hop-limit field # -> update tx'ed hlim to the expected hlim tx_ip.hlim = tx_ip.hlim - 1 self.assertEqual(payload, tx_ip) self.logger.debug("packet verification: SUCCESS") def compare_rx_tx_packet_End_AS_IPv4_in(self, tx_pkt, rx_pkt): """ Compare input and output packet after passing End.AS :param tx_pkt: transmitted packet :param rx_pkt: received packet """ # get first (outer) IPv6 header of rx'ed packet rx_ip = rx_pkt.getlayer(IPv6) rx_srh = None tx_ip = tx_pkt.getlayer(IP) # expected segment-list (SRH order) tx_seglist = self.rewrite_sid_list[::-1] # received ip.src should be equal to SR Policy source self.assertEqual(rx_ip.src, self.rewrite_src_addr) # received ip.dst should be equal to expected sidlist[lastentry] self.assertEqual(rx_ip.dst, tx_seglist[-1]) if len(tx_seglist) > 1: # rx'ed packet should have SRH and IPv4 header self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting)) self.assertTrue(rx_ip.payload.haslayer(IP)) # get SRH rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting) # rx'ed seglist should be equal to seglist self.assertEqual(rx_srh.addresses, tx_seglist) # segleft should be equal to size seglist-1 self.assertEqual(rx_srh.segleft, len(tx_seglist)-1) # segleft should be equal to lastentry self.assertEqual(rx_srh.segleft, rx_srh.lastentry) payload = rx_srh.payload else: # rx'ed packet should NOT have SRH self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting)) # get payload payload = rx_ip.payload # the whole rx'ed pkt beyond SRH should be equal to tx'ed pkt # except for the ttl field and ip checksum # -> adjust tx'ed ttl to expected ttl tx_ip.ttl = tx_ip.ttl - 1 # -> set tx'ed ip checksum to None and let scapy recompute tx_ip.chksum = None # read back the pkt (with str()) to force computing these fields # probably other ways to accomplish this are possible tx_ip = IP(scapy.compat.raw(tx_ip)) self.assertEqual(payload, tx_ip) self.logger.debug("packet verification: SUCCESS") def compare_rx_tx_packet_End_AS_L2_in(self, tx_pkt, rx_pkt): """ Compare input and output packet after passing End.AS :param tx_pkt: transmitted packet :param rx_pkt: received packet """ # get first (outer) IPv6 header of rx'ed packet rx_ip = rx_pkt.getlayer(IPv6) rx_srh = None tx_ether = tx_pkt.getlayer(Ether) # expected segment-list (SRH order) tx_seglist = self.rewrite_sid_list[::-1] # received ip.src should be equal to SR Policy source self.assertEqual(rx_ip.src, self.rewrite_src_addr) # received ip.dst should be equal to expected sidlist[lastentry] self.assertEqual(rx_ip.dst, tx_seglist[-1]) if len(tx_seglist) > 1: # rx'ed packet should have SRH self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting)) # get SRH rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting) # rx'ed seglist should be equal to seglist self.assertEqual(rx_srh.addresses, tx_seglist) # segleft should be equal to size seglist-1 self.assertEqual(rx_srh.segleft, len(tx_seglist)-1) # segleft should be equal to lastentry self.assertEqual(rx_srh.segleft, rx_srh.lastentry) # nh should be "No Next Header" (143) self.assertEqual(rx_srh.nh, 143) # get payload payload = rx_srh.payload else: # rx'ed packet should NOT have SRH self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting)) # get payload payload = rx_ip.payload # the whole rx'ed pkt beyond SRH should be equal to tx'ed pkt self.assertEqual(Ether(scapy.compat.raw(payload)), tx_ether) self.logger.debug("packet verification: SUCCESS") def compare_rx_tx_packet_End_AS_IPv6_out(self, tx_pkt, rx_pkt): """ Compare input and output packet after passing End.AS with IPv6 :param tx_pkt: transmitted packet :param rx_pkt: received packet """ # get first (outer) IPv6 header of rx'ed packet rx_ip = rx_pkt.getlayer(IPv6) tx_ip = tx_pkt.getlayer(IPv6) tx_ip2 = tx_pkt.getlayer(IPv6, 2) # verify if rx'ed packet has no SRH self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting)) # the whole rx_ip pkt should be equal to tx_ip2 # except for the hlim field # -> adjust tx'ed hlim to expected hlim tx_ip2.hlim = tx_ip2.hlim - 1 self.assertEqual(rx_ip, tx_ip2) self.logger.debug("packet verification: SUCCESS") def compare_rx_tx_packet_End_AS_IPv4_out(self, tx_pkt, rx_pkt): """ Compare input and output packet after passing End.AS with IPv4 :param tx_pkt: transmitted packet :param rx_pkt: received packet """ # get IPv4 header of rx'ed packet rx_ip = rx_pkt.getlayer(IP) tx_ip = tx_pkt.getlayer(IPv6) tx_ip2 = tx_pkt.getlayer(IP) # verify if rx'ed packet has no SRH self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting)) # the whole rx_ip pkt should be equal to tx_ip2 # except for the ttl field and ip checksum # -> adjust tx'ed ttl to expected ttl tx_ip2.ttl = tx_ip2.ttl - 1 # -> set tx'ed ip checksum to None and let scapy recompute tx_ip2.chksum = None # read back the pkt (with str()) to force computing these fields # probably other ways to accomplish this are possible tx_ip2 = IP(scapy.compat.raw(tx_ip2)) self.assertEqual(rx_ip, tx_ip2) self.logger.debug("packet verification: SUCCESS") def compare_rx_tx_packet_End_AS_L2_out(self, tx_pkt, rx_pkt): """ Compare input and output packet after passing End.AS with L2 :param tx_pkt: transmitted packet :param rx_pkt: received packet """ # get IPv4 header of rx'ed packet rx_eth = rx_pkt.getlayer(Ether) tx_ip = tx_pkt.getlayer(IPv6) # we can't just get the 2nd Ether layer # get the Raw content and dissect it as Ether tx_eth1 = Ether(scapy.compat.raw(tx_pkt[Raw])) # verify if rx'ed packet has no SRH self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting)) # the whole rx_eth pkt should be equal to tx_eth1 self.assertEqual(rx_eth, tx_eth1) self.logger.debug("packet verification: SUCCESS") def create_stream(self, src_if, dst_if, packet_header, packet_sizes, count): """Create SRv6 input packet stream for defined interface. :param VppInterface src_if: Interface to create packet stream for :param VppInterface dst_if: destination interface of packet stream :param packet_header: Layer3 scapy packet headers, L2 is added when not provided, Raw(payload) with packet_info is added :param list packet_sizes: packet stream pckt sizes,sequentially applied to packets in stream have :param int count: number of packets in packet stream :return: list of packets """ self.logger.info("Creating packets") pkts = [] for i in range(0, count-1): payload_info = self.create_packet_info(src_if, dst_if) self.logger.debug( "Creating packet with index %d" % (payload_info.index)) payload = self.info_to_payload(payload_info) # add L2 header if not yet provided in packet_header if packet_header.getlayer(0).name == 'Ethernet': p = (packet_header / Raw(payload)) else: p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) / packet_header / Raw(payload)) size = packet_sizes[i % len(packet_sizes)] self.logger.debug("Packet size %d" % (size)) self.extend_packet(p, size) # we need to store the packet with the automatic fields computed # read back the dumped packet (with str()) # to force computing these fields # probably other ways are possible p = Ether(scapy.compat.raw(p)) payload_info.data = p.copy() self.logger.debug(ppp("Created packet:", p)) pkts.append(p) self.logger.info("Done creating packets") return pkts def send_and_verify_pkts(self, input, pkts, output, compare_func): """Send packets and verify received packets using compare_func :param input: ingress interface of DUT :param pkts: list of packets to transmit :param output: egress interface of DUT :param compare_func: function to compare in and out packets """ # add traffic stream to input interface input.add_stream(pkts) # enable capture on all interfaces self.pg_enable_capture(self.pg_interfaces) # start traffic self.logger.info("Starting traffic") self.pg_start() # get output capture self.logger.info("Getting packet capture") capture = output.get_capture() # assert nothing was captured on input interface # input.assert_nothing_captured() # verify captured packets self.verify_captured_pkts(output, capture, compare_func) def create_packet_header_IPv6(self): """Create packet header: IPv6 header, UDP header :param dst: IPv6 destination address IPv6 source address is 1234::1 IPv6 destination address is 4321::1 UDP source port and destination port are 1234 """ p = (IPv6(src='1234::1', dst='4321::1') / UDP(sport=1234, dport=1234)) return p def create_packet_header_IPv6_SRH_IPv6(self, sidlist, segleft): """Create packet header: IPv6 encapsulated in SRv6: IPv6 header with SRH, IPv6 header, UDP header :param list sidlist: segment list of outer IPv6 SRH :param int segleft: segments-left field of outer IPv6 SRH Outer IPv6 source address is set to 5678::1 Outer IPv6 destination address is set to sidlist[segleft] IPv6 source addresses is 1234::1 IPv6 destination address is 4321::1 UDP source port and destination port are 1234 """ p = (IPv6(src='5678::1', dst=sidlist[segleft]) / IPv6ExtHdrSegmentRouting(addresses=sidlist, segleft=segleft, nh=41) / IPv6(src='1234::1', dst='4321::1') / UDP(sport=1234, dport=1234)) return p def create_packet_header_IPv4(self): """Create packet header: IPv4 header, UDP header :param dst: IPv4 destination address IPv4 source address is 123.1.1.1 IPv4 destination address is 124.1.1.1 UDP source port and destination port are 1234 """ p = (IP(src='123.1.1.1', dst='124.1.1.1') / UDP(sport=1234, dport=1234)) return p def create_packet_header_IPv6_SRH_IPv4(self, sidlist, segleft): """Create packet header: IPv4 encapsulated in SRv6: IPv6 header with SRH, IPv4 header, UDP header :param ipv4address dst: inner IPv4 destination address :param list sidlist: segment list of outer IPv6 SRH :param int segleft: segments-left field of outer IPv6 SRH Outer IPv6 destination address is set to sidlist[segleft] IPv6 source address is 1234::1 IPv4 source address is 123.1.1.1 IPv4 destination address is 124.1.1.1 UDP source port and destination port are 1234 """ p = (IPv6(src='1234::1', dst=sidlist[segleft]) / IPv6ExtHdrSegmentRouting(addresses=sidlist, segleft=segleft, nh=4) / IP(src='123.1.1.1', dst='124.1.1.1') / UDP(sport=1234, dport=1234)) return p def create_packet_header_L2(self, vlan=0): """Create packet header: L2 header :param vlan: if vlan!=0 then add 802.1q header """ # Note: the dst addr ('00:55:44:33:22:11') is used in # the compare function compare_rx_tx_packet_T_Encaps_L2 # to detect presence of L2 in SRH payload p = Ether(src='00:11:22:33:44:55', dst='00:55:44:33:22:11') etype = 0x8137 # IPX if vlan: # add 802.1q layer p /= Dot1Q(vlan=vlan, type=etype) else: p.type = etype return p def create_packet_header_IPv6_SRH_L2(self, sidlist, segleft, vlan=0): """Create packet header: L2 encapsulated in SRv6: IPv6 header with SRH, L2 :param list sidlist: segment list of outer IPv6 SRH :param int segleft: segments-left field of outer IPv6 SRH :param vlan: L2 vlan; if vlan!=0 then add 802.1q header Outer IPv6 destination address is set to sidlist[segleft] IPv6 source address is 1234::1 """ eth = Ether(src='00:11:22:33:44:55', dst='00:55:44:33:22:11') etype = 0x8137 # IPX if vlan: # add 802.1q layer eth /= Dot1Q(vlan=vlan, type=etype) else: eth.type = etype p = (IPv6(src='1234::1', dst=sidlist[segleft]) / IPv6ExtHdrSegmentRouting(addresses=sidlist, segleft=segleft, nh=143) / eth) return p def get_payload_info(self, packet): """ Extract the payload_info from the packet """ # in most cases, payload_info is in packet[Raw] # but packet[Raw] gives the complete payload # (incl L2 header) for the T.Encaps L2 case try: payload_info = self.payload_to_info(packet[Raw]) except: # remote L2 header from packet[Raw]: # take packet[Raw], convert it to an Ether layer # and then extract Raw from it payload_info = self.payload_to_info( Ether(scapy.compat.raw(packet[Raw]))[Raw]) return payload_info def verify_captured_pkts(self, dst_if, capture, compare_func): """ Verify captured packet stream for specified interface. Compare ingress with egress packets using the specified compare fn :param dst_if: egress interface of DUT :param capture: captured packets :param compare_func: function to compare in and out packet """ self.logger.info("Verifying capture on interface %s using function %s" % (dst_if.name, compare_func.__name__)) last_info = dict() for i in self.pg_interfaces: last_info[i.sw_if_index] = None dst_sw_if_index = dst_if.sw_if_index for packet in capture: try: # extract payload_info from packet's payload payload_info = self.get_payload_info(packet) packet_index = payload_info.index self.logger.debug("Verifying packet with index %d" % (packet_index)) # packet should have arrived on the expected interface self.assertEqual(payload_info.dst, dst_sw_if_index) self.logger.debug( "Got packet on interface %s: src=%u (idx=%u)" % (dst_if.name, payload_info.src, packet_index)) # search for payload_info with same src and dst if_index # this will give us the transmitted packet next_info = self.get_next_packet_info_for_interface2( payload_info.src, dst_sw_if_index, last_info[payload_info.src]) last_info[payload_info.src] = next_info # next_info should not be None self.assertTrue(next_info is not None) # index of tx and rx packets should be equal self.assertEqual(packet_index, next_info.index) # data field of next_info contains the tx packet txed_packet = next_info.data self.logger.debug(ppp("Transmitted packet:", txed_packet)) # ppp=Pretty Print Packet self.logger.debug(ppp("Received packet:", packet)) # compare rcvd packet with expected packet using compare_func compare_func(txed_packet, packet) except: self.logger.error(ppp("Unexpected or invalid packet:", packet)) raise # have all expected packets arrived? for i in self.pg_interfaces: remaining_packet = self.get_next_packet_info_for_interface2( i.sw_if_index, dst_sw_if_index, last_info[i.sw_if_index]) self.assertTrue(remaining_packet is None, "Interface %s: Packet expected from interface %s " "didn't arrive" % (dst_if.name, i.name)) if __name__ == '__main__': unittest.main(testRunner=VppTestRunner)