aboutsummaryrefslogtreecommitdiffstats
path: root/resources/tools/terraform
diff options
context:
space:
mode:
Diffstat (limited to 'resources/tools/terraform')
-rw-r--r--resources/tools/terraform/1n_nmd/.gitignore4
-rw-r--r--resources/tools/terraform/1n_nmd/main.tf40
-rw-r--r--resources/tools/terraform/1n_nmd/prod_storage/prod-nginx.nomad270
-rw-r--r--resources/tools/terraform/1n_nmd/prod_storage/prod-storage.nomad256
-rw-r--r--resources/tools/terraform/1n_nmd/prod_storage/resources.tf9
-rw-r--r--resources/tools/terraform/1n_nmd/prod_vpp_device/prod_csit_shim.nomad171
-rw-r--r--resources/tools/terraform/1n_nmd/prod_vpp_device/resources.tf4
-rw-r--r--resources/tools/terraform/1n_nmd/variables.tf5
-rw-r--r--resources/tools/terraform/2n_aws_c5n/.gitignore4
-rw-r--r--resources/tools/terraform/2n_aws_c5n/main.tf304
-rw-r--r--resources/tools/terraform/2n_aws_c5n/nic.tf67
-rw-r--r--resources/tools/terraform/3n_aws_c5n/.gitignore4
-rw-r--r--resources/tools/terraform/3n_aws_c5n/main.tf361
-rw-r--r--resources/tools/terraform/3n_aws_c5n/nic.tf101
-rw-r--r--resources/tools/terraform/3n_azure_fsv2/.gitignore4
-rw-r--r--resources/tools/terraform/3n_azure_fsv2/main.tf593
-rw-r--r--resources/tools/terraform/3n_azure_fsv2/nic.tf133
17 files changed, 0 insertions, 2330 deletions
diff --git a/resources/tools/terraform/1n_nmd/.gitignore b/resources/tools/terraform/1n_nmd/.gitignore
deleted file mode 100644
index fc64f0039f..0000000000
--- a/resources/tools/terraform/1n_nmd/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-.terraform/
-.terraform.tfstate.lock.info
-terraform.tfstate
-terraform.tfstate.backup
diff --git a/resources/tools/terraform/1n_nmd/main.tf b/resources/tools/terraform/1n_nmd/main.tf
deleted file mode 100644
index 330f647476..0000000000
--- a/resources/tools/terraform/1n_nmd/main.tf
+++ /dev/null
@@ -1,40 +0,0 @@
-terraform {
- # This module is now only being tested with Terraform 0.13.5+.
- required_version = ">= 0.13.5"
-}
-
-provider "nomad" {
- address = var.nomad_provider_address
- alias = "yul1"
-}
-
-# For convenience in simple configurations, a child module automatically
-# inherits default (un-aliased) provider configurations from its parent.
-# This means that explicit provider blocks appear only in the root module,
-# and downstream modules can simply declare resources for that provider
-# and have them automatically associated with the root provider
-# configurations.
-
-# prod_storage
-# + prod-group1-nginx
-# + prod-group1-storage
-# + services
-# + docs.nginx.service.consul
-# + logs.nginx.service.consul
-# + storage.nginx.service.consul
-module "prod_storage" {
- source = "./prod_storage"
- providers = {
- nomad = nomad.yul1
- }
-}
-
-# prod_vpp_device
-# + prod-csit-shim-amd
-# + prod-csit-shim-arm
-module "prod_vpp_device" {
- source = "./prod_vpp_device"
- providers = {
- nomad = nomad.yul1
- }
-} \ No newline at end of file
diff --git a/resources/tools/terraform/1n_nmd/prod_storage/prod-nginx.nomad b/resources/tools/terraform/1n_nmd/prod_storage/prod-nginx.nomad
deleted file mode 100644
index 2af62a06c3..0000000000
--- a/resources/tools/terraform/1n_nmd/prod_storage/prod-nginx.nomad
+++ /dev/null
@@ -1,270 +0,0 @@
-job "prod-nginx" {
- # The "region" parameter specifies the region in which to execute the job.
- # If omitted, this inherits the default region name of "global".
- # region = "global"
- #
- # The "datacenters" parameter specifies the list of datacenters which should
- # be considered when placing this task. This must be provided.
- datacenters = [ "yul1" ]
-
- # The "type" parameter controls the type of job, which impacts the scheduler's
- # decision on placement. This configuration is optional and defaults to
- # "service". For a full list of job types and their differences, please see
- # the online documentation.
- #
- # For more information, please see the online documentation at:
- #
- # https://www.nomadproject.io/docs/jobspec/schedulers.html
- #
- type = "service"
-
- update {
- # The "max_parallel" parameter specifies the maximum number of updates to
- # perform in parallel. In this case, this specifies to update a single task
- # at a time.
- max_parallel = 0
-
- # The "min_healthy_time" parameter specifies the minimum time the allocation
- # must be in the healthy state before it is marked as healthy and unblocks
- # further allocations from being updated.
- min_healthy_time = "10s"
-
- # The "healthy_deadline" parameter specifies the deadline in which the
- # allocation must be marked as healthy after which the allocation is
- # automatically transitioned to unhealthy. Transitioning to unhealthy will
- # fail the deployment and potentially roll back the job if "auto_revert" is
- # set to true.
- healthy_deadline = "3m"
-
- # The "progress_deadline" parameter specifies the deadline in which an
- # allocation must be marked as healthy. The deadline begins when the first
- # allocation for the deployment is created and is reset whenever an allocation
- # as part of the deployment transitions to a healthy state. If no allocation
- # transitions to the healthy state before the progress deadline, the
- # deployment is marked as failed.
- progress_deadline = "10m"
-
- # The "auto_revert" parameter specifies if the job should auto-revert to the
- # last stable job on deployment failure. A job is marked as stable if all the
- # allocations as part of its deployment were marked healthy.
- auto_revert = false
-
- # The "canary" parameter specifies that changes to the job that would result
- # in destructive updates should create the specified number of canaries
- # without stopping any previous allocations. Once the operator determines the
- # canaries are healthy, they can be promoted which unblocks a rolling update
- # of the remaining allocations at a rate of "max_parallel".
- #
- # Further, setting "canary" equal to the count of the task group allows
- # blue/green deployments. When the job is updated, a full set of the new
- # version is deployed and upon promotion the old version is stopped.
- canary = 0
- }
-
- # The reschedule stanza specifies the group's rescheduling strategy. If
- # specified at the job level, the configuration will apply to all groups
- # within the job. If the reschedule stanza is present on both the job and the
- # group, they are merged with the group stanza taking the highest precedence
- # and then the job.
- reschedule {
- delay = "30s"
- delay_function = "constant"
- unlimited = true
- }
-
-
- # The "group" stanza defines a series of tasks that should be co-located on
- # the same Nomad client. Any task within a group will be placed on the same
- # client.
- #
- # For more information and examples on the "group" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/group.html
- #
- group "prod-group1-nginx" {
- # The "count" parameter specifies the number of the task groups that should
- # be running under this group. This value must be non-negative and defaults
- # to 1.
- count = 1
-
- # The restart stanza configures a tasks's behavior on task failure. Restarts
- # happen on the client that is running the task.
- restart {
- interval = "10m"
- attempts = 2
- delay = "15s"
- mode = "fail"
- }
-
- # All groups in this job should be scheduled on different hosts.
- constraint {
- operator = "distinct_hosts"
- value = "false"
- }
-
- # Prioritize one node.
- affinity {
- attribute = "${attr.unique.hostname}"
- value = "s46-nomad"
- weight = 100
- }
-
- # The volume stanza allows the group to specify that it requires a given
- # volume from the cluster.
- #
- # For more information and examples on the "volume" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/volume
- volume "prod-volume1-storage" {
- type = "host"
- read_only = false
- source = "prod-volume-data1-1"
- }
-
- # The "task" stanza creates an individual unit of work, such as a Docker
- # container, web application, or batch processing.
- #
- # For more information and examples on the "task" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/task.html
- #
- task "prod-task1-nginx" {
- # The "driver" parameter specifies the task driver that should be used to
- # run the task.
- driver = "docker"
-
- volume_mount {
- volume = "prod-volume1-storage"
- destination = "/data/"
- read_only = true
- }
-
- # The "config" stanza specifies the driver configuration, which is passed
- # directly to the driver to start the task. The details of configurations
- # are specific to each driver, so please see specific driver
- # documentation for more information.
- config {
- image = "nginx:stable"
- dns_servers = [ "${attr.unique.network.ip-address}" ]
- port_map {
- https = 443
- }
- privileged = false
- volumes = [
- "/etc/consul.d/ssl/consul.pem:/etc/ssl/certs/nginx-cert.pem",
- "/etc/consul.d/ssl/consul-key.pem:/etc/ssl/private/nginx-key.pem",
- "custom/logs.conf:/etc/nginx/conf.d/logs.conf",
- "custom/docs.conf:/etc/nginx/conf.d/docs.conf"
- ]
- }
-
- # The "template" stanza instructs Nomad to manage a template, such as
- # a configuration file or script. This template can optionally pull data
- # from Consul or Vault to populate runtime configuration data.
- #
- # For more information and examples on the "template" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/template.html
- #
- template {
- data = <<EOH
- server {
- listen 443 ssl default_server;
- server_name logs.nginx.service.consul;
- keepalive_timeout 70;
- ssl_session_cache shared:SSL:10m;
- ssl_session_timeout 10m;
- ssl_protocols TLSv1.2;
- ssl_prefer_server_ciphers on;
- ssl_ciphers "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384";
- ssl_certificate /etc/ssl/certs/nginx-cert.pem;
- ssl_certificate_key /etc/ssl/private/nginx-key.pem;
- location / {
- root /data/logs.fd.io;
- index _;
- autoindex on;
- autoindex_exact_size on;
- autoindex_format html;
- autoindex_localtime off;
- }
- location ~ \.(html.gz)$ {
- root /data/logs.fd.io;
- add_header Content-Encoding gzip;
- add_header Content-Type text/html;
- }
- location ~ \.(txt.gz|log.gz)$ {
- root /data/logs.fd.io;
- add_header Content-Encoding gzip;
- add_header Content-Type text/plain;
- }
- location ~ \.(xml.gz)$ {
- root /data/logs.fd.io;
- add_header Content-Encoding gzip;
- add_header Content-Type application/xml;
- }
- }
- EOH
- destination = "custom/logs.conf"
- }
- template {
- data = <<EOH
- server {
- listen 443 ssl;
- server_name docs.nginx.service.consul;
- keepalive_timeout 70;
- ssl_session_cache shared:SSL:10m;
- ssl_session_timeout 10m;
- ssl_protocols TLSv1.2;
- ssl_prefer_server_ciphers on;
- ssl_ciphers "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384";
- ssl_certificate /etc/ssl/certs/nginx-cert.pem;
- ssl_certificate_key /etc/ssl/private/nginx-key.pem;
- location / {
- root /data/docs.fd.io;
- index index.html index.htm;
- }
- }
- EOH
- destination = "custom/docs.conf"
- }
-
- # The service stanza instructs Nomad to register a service with Consul.
- #
- # For more information and examples on the "task" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/service.html
- #
- service {
- name = "nginx"
- port = "https"
- tags = [ "docs", "logs" ]
- }
-
- # The "resources" stanza describes the requirements a task needs to
- # execute. Resource requirements include memory, network, cpu, and more.
- # This ensures the task will execute on a machine that contains enough
- # resource capacity.
- #
- # For more information and examples on the "resources" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/resources.html
- #
- resources {
- cpu = 1000
- memory = 1024
- network {
- mode = "bridge"
- port "https" {
- static = 443
- }
- }
- }
- }
- }
-} \ No newline at end of file
diff --git a/resources/tools/terraform/1n_nmd/prod_storage/prod-storage.nomad b/resources/tools/terraform/1n_nmd/prod_storage/prod-storage.nomad
deleted file mode 100644
index 4e8f7ecb98..0000000000
--- a/resources/tools/terraform/1n_nmd/prod_storage/prod-storage.nomad
+++ /dev/null
@@ -1,256 +0,0 @@
-job "prod-storage" {
- # The "region" parameter specifies the region in which to execute the job.
- # If omitted, this inherits the default region name of "global".
- # region = "global"
- #
- # The "datacenters" parameter specifies the list of datacenters which should
- # be considered when placing this task. This must be provided.
- datacenters = [ "yul1" ]
-
- # The "type" parameter controls the type of job, which impacts the scheduler's
- # decision on placement. This configuration is optional and defaults to
- # "service". For a full list of job types and their differences, please see
- # the online documentation.
- #
- # For more information, please see the online documentation at:
- #
- # https://www.nomadproject.io/docs/jobspec/schedulers.html
- #
- type = "service"
-
- update {
- # The "max_parallel" parameter specifies the maximum number of updates to
- # perform in parallel. In this case, this specifies to update a single task
- # at a time.
- max_parallel = 0
-
- # The "min_healthy_time" parameter specifies the minimum time the allocation
- # must be in the healthy state before it is marked as healthy and unblocks
- # further allocations from being updated.
- min_healthy_time = "10s"
-
- # The "healthy_deadline" parameter specifies the deadline in which the
- # allocation must be marked as healthy after which the allocation is
- # automatically transitioned to unhealthy. Transitioning to unhealthy will
- # fail the deployment and potentially roll back the job if "auto_revert" is
- # set to true.
- healthy_deadline = "3m"
-
- # The "progress_deadline" parameter specifies the deadline in which an
- # allocation must be marked as healthy. The deadline begins when the first
- # allocation for the deployment is created and is reset whenever an allocation
- # as part of the deployment transitions to a healthy state. If no allocation
- # transitions to the healthy state before the progress deadline, the
- # deployment is marked as failed.
- progress_deadline = "10m"
-
- # The "auto_revert" parameter specifies if the job should auto-revert to the
- # last stable job on deployment failure. A job is marked as stable if all the
- # allocations as part of its deployment were marked healthy.
- auto_revert = false
-
- # The "canary" parameter specifies that changes to the job that would result
- # in destructive updates should create the specified number of canaries
- # without stopping any previous allocations. Once the operator determines the
- # canaries are healthy, they can be promoted which unblocks a rolling update
- # of the remaining allocations at a rate of "max_parallel".
- #
- # Further, setting "canary" equal to the count of the task group allows
- # blue/green deployments. When the job is updated, a full set of the new
- # version is deployed and upon promotion the old version is stopped.
- canary = 0
- }
-
- # All groups in this job should be scheduled on different hosts.
- constraint {
- operator = "distinct_hosts"
- value = "true"
- }
-
- # the same Nomad client. Any task within a group will be placed on the same
- # client.
- #
- # For more information and examples on the "group" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/group.html
- #
- group "prod-group1-storage" {
- # The "count" parameter specifies the number of the task groups that should
- # be running under this group. This value must be non-negative and defaults
- # to 1.
- count = 2
-
- # Hard coding prefered node as primary.
- affinity {
- attribute = "${attr.unique.hostname}"
- value = "s46-nomad"
- weight = 100
- }
-
- # https://www.nomadproject.io/docs/job-specification/volume
- volume "prod-volume1-storage" {
- type = "host"
- read_only = false
- source = "prod-volume-data1-1"
- }
-
- # The "task" stanza creates an individual unit of work, such as a Docker
- # container, web application, or batch processing.
- #
- # For more information and examples on the "task" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/task.html
- #
- task "prod-task1-storage" {
- # The "driver" parameter specifies the task driver that should be used to
- # run the task.
- driver = "docker"
-
- volume_mount {
- volume = "prod-volume1-storage"
- destination = "/data/"
- read_only = false
- }
-
- # The "config" stanza specifies the driver configuration, which is passed
- # directly to the driver to start the task. The details of configurations
- # are specific to each driver, so please see specific driver
- # documentation for more information.
- config {
- image = "minio/minio:RELEASE.2020-11-19T23-48-16Z"
- dns_servers = [ "${attr.unique.network.ip-address}" ]
- command = "server"
- args = [ "/data/" ]
- port_map {
- http = 9000
- }
- privileged = false
- }
-
- env {
- MINIO_ACCESS_KEY = "minio"
- MINIO_SECRET_KEY = "minio123"
- MINIO_BROWSER = "off"
- }
-
- # The service stanza instructs Nomad to register a service with Consul.
- #
- # For more information and examples on the "task" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/service.html
- #
- service {
- name = "storage"
- port = "http"
- tags = [ "storage${NOMAD_ALLOC_INDEX}" ]
- check {
- name = "alive"
- type = "http"
- port = "http"
- protocol = "http"
- method = "GET"
- path = "/minio/health/live"
- interval = "10s"
- timeout = "2s"
- task = "${TASK}"
- }
- }
-
- # The "resources" stanza describes the requirements a task needs to
- # execute. Resource requirements include memory, network, cpu, and more.
- # This ensures the task will execute on a machine that contains enough
- # resource capacity.
- #
- # For more information and examples on the "resources" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/resources.html
- #
- resources {
- cpu = 2000
- memory = 2048
- network {
- port "http" {
- static = 9000
- }
- }
- }
- }
-
- task "prod-task2-sync" {
- # The "raw_exec" parameter specifies the task driver that should be used
- # to run the task.
- driver = "raw_exec"
-
- # The "template" stanza instructs Nomad to manage a template, such as
- # a configuration file or script. This template can optionally pull data
- # from Consul or Vault to populate runtime configuration data.
- #
- # For more information and examples on the "template" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/template.html
- #
- template {
- data = <<EOH
-#!/bin/bash
-
-INOTIFY_OPTONS="--recursive --monitor"
-VOLUMES="/data/logs.fd.io /data/docs.fd.io"
-
-if [ '{{ env "attr.unique.network.ip-address" }}' = "10.32.8.14" ]; then
-echo "Running notify daemon"
- inotifywait -e moved_to ${INOTIFY_OPTONS} ${VOLUMES} | \
- while read path action file; do
- key="testuser"
- secret="Csit1234"
-
- resource=${path#"/data"}${file}
- date=$(date -R)
- _signature="PUT\n\napplication/octet-stream\n${date}\n${resource}"
- signature=$(echo -en ${_signature} | openssl sha1 -hmac ${secret} -binary | base64)
-
- curl -v -X PUT -T "${path}${file}" \
- -H "Host: storage0.storage.service.consul:9000" \
- -H "Date: ${date}" \
- -H "Content-Type: application/octet-stream" \
- -H "Authorization: AWS ${key}:${signature}" \
- http://storage0.storage.service.consul:9000${resource}
- done
-else
- while :; do sleep 2073600; done
-fi
-
-EOH
- destination = "local/sync.sh"
- perms = "755"
- }
-
- # The "config" stanza specifies the driver configuration, which is passed
- # directly to the driver to start the task. The details of configurations
- # are specific to each driver, so please see specific driver
- # documentation for more information.
- config {
- command = "local/sync.sh"
- }
-
- # The "resources" stanza describes the requirements a task needs to
- # execute. Resource requirements include memory, network, cpu, and more.
- # This ensures the task will execute on a machine that contains enough
- # resource capacity.
- #
- # For more information and examples on the "resources" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/resources.html
- #
- resources {
- cpu = 500
- memory = 256
- }
- }
- }
-} \ No newline at end of file
diff --git a/resources/tools/terraform/1n_nmd/prod_storage/resources.tf b/resources/tools/terraform/1n_nmd/prod_storage/resources.tf
deleted file mode 100644
index 4c42927be6..0000000000
--- a/resources/tools/terraform/1n_nmd/prod_storage/resources.tf
+++ /dev/null
@@ -1,9 +0,0 @@
-resource "nomad_job" "prod_nginx" {
- provider = nomad
- jobspec = file("${path.module}/prod-nginx.nomad")
-}
-
-resource "nomad_job" "prod_storage" {
- provider = nomad
- jobspec = file("${path.module}/prod-storage.nomad")
-} \ No newline at end of file
diff --git a/resources/tools/terraform/1n_nmd/prod_vpp_device/prod_csit_shim.nomad b/resources/tools/terraform/1n_nmd/prod_vpp_device/prod_csit_shim.nomad
deleted file mode 100644
index 328f503a0b..0000000000
--- a/resources/tools/terraform/1n_nmd/prod_vpp_device/prod_csit_shim.nomad
+++ /dev/null
@@ -1,171 +0,0 @@
-job "prod-csit-shim" {
- # The "region" parameter specifies the region in which to execute the job.
- # If omitted, this inherits the default region name of "global".
- # region = "global"
- #
- # The "datacenters" parameter specifies the list of datacenters which should
- # be considered when placing this task. This must be provided.
- datacenters = [ "yul1" ]
-
- # The "type" parameter controls the type of job, which impacts the scheduler's
- # decision on placement. This configuration is optional and defaults to
- # "service". For a full list of job types and their differences, please see
- # the online documentation.
- #
- # For more information, please see the online documentation at:
- #
- # https://www.nomadproject.io/docs/jobspec/schedulers.html
- #
- type = "system"
-
- # The "group" stanza defines a series of tasks that should be co-located on
- # the same Nomad client. Any task within a group will be placed on the same
- # client.
- #
- # For more information and examples on the "group" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/group.html
- #
- group "prod-group1-csit-shim-amd" {
- # The "count" parameter specifies the number of the task groups that should
- # be running under this group. This value must be non-negative and defaults
- # to 1.
- count = 1
-
- constraint {
- attribute = "${node.class}"
- value = "csit"
- }
-
- restart {
- interval = "1m"
- attempts = 3
- delay = "15s"
- mode = "delay"
- }
-
- # The "task" stanza creates an individual unit of work, such as a Docker
- # container, web application, or batch processing.
- #
- # For more information and examples on the "task" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/task.html
- #
- task "prod-task1-csit-shim-amd" {
- # The "driver" parameter specifies the task driver that should be used to
- # run the task.
- driver = "docker"
-
- # The "config" stanza specifies the driver configuration, which is passed
- # directly to the driver to start the task. The details of configurations
- # are specific to each driver, so please see specific driver
- # documentation for more information.
- config {
- image = "csit_shim-ubuntu1804:local"
- network_mode = "host"
- pid_mode = "host"
- volumes = [
- "/var/run/docker.sock:/var/run/docker.sock"
- ]
- privileged = true
- }
-
- # The "resources" stanza describes the requirements a task needs to
- # execute. Resource requirements include memory, network, cpu, and more.
- # This ensures the task will execute on a machine that contains enough
- # resource capacity.
- #
- # For more information and examples on the "resources" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/resources.html
- #
- resources {
- cpu = 100
- memory = 128
- network {
- mbits = 10
- port "ssh" {
- static = 6022
- }
- port "ssh2" {
- static = 6023
- }
- }
- }
- }
- }
-
- group "prod-group1-csit-shim-arm" {
- # The "count" parameter specifies the number of the task groups that should
- # be running under this group. This value must be non-negative and defaults
- # to 1.
- count = 1
-
- constraint {
- attribute = "${node.class}"
- value = "csitarm"
- }
-
- restart {
- interval = "1m"
- attempts = 3
- delay = "15s"
- mode = "delay"
- }
-
- # The "task" stanza creates an individual unit of work, such as a Docker
- # container, web application, or batch processing.
- #
- # For more information and examples on the "task" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/task.html
- #
- task "prod-task1-csit-shim-arm" {
- # The "driver" parameter specifies the task driver that should be used to
- # run the task.
- driver = "docker"
-
- # The "config" stanza specifies the driver configuration, which is passed
- # directly to the driver to start the task. The details of configurations
- # are specific to each driver, so please see specific driver
- # documentation for more information.
- config {
- image = "csit_shim-ubuntu1804:local"
- network_mode = "host"
- pid_mode = "host"
- volumes = [
- "/var/run/docker.sock:/var/run/docker.sock"
- ]
- privileged = true
- }
-
- # The "resources" stanza describes the requirements a task needs to
- # execute. Resource requirements include memory, network, cpu, and more.
- # This ensures the task will execute on a machine that contains enough
- # resource capacity.
- #
- # For more information and examples on the "resources" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/resources.html
- #
- resources {
- cpu = 100
- memory = 128
- network {
- mbits = 10
- port "ssh" {
- static = 6022
- }
- port "ssh2" {
- static = 6023
- }
- }
- }
- }
- }
-} \ No newline at end of file
diff --git a/resources/tools/terraform/1n_nmd/prod_vpp_device/resources.tf b/resources/tools/terraform/1n_nmd/prod_vpp_device/resources.tf
deleted file mode 100644
index dace9094f2..0000000000
--- a/resources/tools/terraform/1n_nmd/prod_vpp_device/resources.tf
+++ /dev/null
@@ -1,4 +0,0 @@
-resource "nomad_job" "prod_csit_shim" {
- provider = nomad
- jobspec = file("${path.module}/prod_csit_shim.nomad")
-} \ No newline at end of file
diff --git a/resources/tools/terraform/1n_nmd/variables.tf b/resources/tools/terraform/1n_nmd/variables.tf
deleted file mode 100644
index 0782f8669c..0000000000
--- a/resources/tools/terraform/1n_nmd/variables.tf
+++ /dev/null
@@ -1,5 +0,0 @@
-variable "nomad_provider_address" {
- description = "FD.io Nomad cluster address."
- type = string
- default = "http://nomad.service.consul:4646"
-} \ No newline at end of file
diff --git a/resources/tools/terraform/2n_aws_c5n/.gitignore b/resources/tools/terraform/2n_aws_c5n/.gitignore
deleted file mode 100644
index fc64f0039f..0000000000
--- a/resources/tools/terraform/2n_aws_c5n/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-.terraform/
-.terraform.tfstate.lock.info
-terraform.tfstate
-terraform.tfstate.backup
diff --git a/resources/tools/terraform/2n_aws_c5n/main.tf b/resources/tools/terraform/2n_aws_c5n/main.tf
deleted file mode 100644
index c0da7a487e..0000000000
--- a/resources/tools/terraform/2n_aws_c5n/main.tf
+++ /dev/null
@@ -1,304 +0,0 @@
-provider "aws" {
- region = "eu-central-1"
-}
-
-variable "avail_zone" {
- type = string
- default = "eu-central-1a"
-}
-# Base VPC CIDRs
-variable "vpc_cidr_mgmt" {
- type = string
- default = "192.168.0.0/24"
-}
-variable "vpc_cidr_b" {
- type = string
- default = "192.168.10.0/24"
-}
-variable "vpc_cidr_c" {
- type = string
- default = "200.0.0.0/24"
-}
-variable "vpc_cidr_d" {
- type = string
- default = "192.168.20.0/24"
-}
-
-# Trex Dummy CIDRs
-variable "trex_dummy_cidr_port_0" {
- type = string
- default = "10.0.0.0/24"
-}
-variable "trex_dummy_cidr_port_1" {
- type = string
- default = "20.0.0.0/24"
-}
-
-# IPs
-variable "tg_if1_ip" {
- type = string
- default = "192.168.10.254"
-}
-variable "tg_if2_ip" {
- type = string
- default = "192.168.20.254"
-}
-variable "dut1_if1_ip" {
- type = string
- default = "192.168.10.11"
-}
-variable "dut1_if2_ip" {
- type = string
- default = "192.168.20.11"
-}
-variable "tg_mgmt_ip" {
- type = string
- default = "192.168.0.10"
-}
-variable "dut1_mgmt_ip" {
- type = string
- default = "192.168.0.11"
-}
-
-# Instance Type
-variable "instance_type" {
- type = string
- default = "c5n.2xlarge"
-}
-
-resource "aws_vpc" "CSIT" {
- cidr_block = var.vpc_cidr_mgmt
-}
-
-resource "aws_security_group" "CSIT" {
- name = "CSIT"
- description = "Allow inbound traffic"
- vpc_id = aws_vpc.CSIT.id
-
- ingress {
- from_port = 22
- to_port = 22
- protocol = "tcp"
- cidr_blocks = ["0.0.0.0/0"]
- }
-
- ingress {
- from_port = 0
- to_port = 0
- protocol = -1
- self = true
- }
-
- egress {
- from_port = 0
- to_port = 0
- protocol = "-1"
- cidr_blocks = ["0.0.0.0/0"]
- }
-
- depends_on = [aws_vpc.CSIT]
-}
-
-resource "aws_vpc_ipv4_cidr_block_association" "b" {
- vpc_id = aws_vpc.CSIT.id
- cidr_block = var.vpc_cidr_b
- depends_on = [aws_vpc.CSIT]
-}
-resource "aws_vpc_ipv4_cidr_block_association" "c" {
- vpc_id = aws_vpc.CSIT.id
- cidr_block = var.vpc_cidr_c
- depends_on = [aws_vpc.CSIT]
-}
-resource "aws_vpc_ipv4_cidr_block_association" "d" {
- vpc_id = aws_vpc.CSIT.id
- cidr_block = var.vpc_cidr_d
- depends_on = [aws_vpc.CSIT]
-}
-
-resource "aws_subnet" "mgmt" {
- vpc_id = aws_vpc.CSIT.id
- cidr_block = var.vpc_cidr_mgmt
- availability_zone = var.avail_zone
- depends_on = [aws_vpc.CSIT]
-}
-
-resource "aws_subnet" "b" {
- vpc_id = aws_vpc.CSIT.id
- cidr_block = var.vpc_cidr_b
- availability_zone = var.avail_zone
- depends_on = [aws_vpc.CSIT, aws_vpc_ipv4_cidr_block_association.b]
-}
-
-resource "aws_subnet" "c" {
- vpc_id = aws_vpc.CSIT.id
- cidr_block = var.vpc_cidr_c
- availability_zone = var.avail_zone
- depends_on = [aws_vpc.CSIT, aws_vpc_ipv4_cidr_block_association.c]
-}
-
-resource "aws_subnet" "d" {
- vpc_id = aws_vpc.CSIT.id
- cidr_block = var.vpc_cidr_d
- availability_zone = var.avail_zone
- depends_on = [aws_vpc.CSIT, aws_vpc_ipv4_cidr_block_association.d]
-}
-
-resource "aws_internet_gateway" "CSIT" {
- vpc_id = aws_vpc.CSIT.id
- depends_on = [aws_vpc.CSIT]
-}
-
-resource "aws_key_pair" "CSIT" {
- key_name = "CSIT"
- public_key = file("~/.ssh/id_rsa.pub")
-}
-
-data "aws_ami" "ubuntu" {
- most_recent = true
-
- filter {
- name = "name"
- values = ["*hvm-ssd/ubuntu-bionic-18.04-amd64*"]
- }
-
- filter {
- name = "virtualization-type"
- values = ["hvm"]
- }
-
- owners = ["099720109477"] # Canonical
-}
-
-resource "aws_placement_group" "CSIT" {
- name = "CSIT"
- strategy = "cluster"
-}
-
-resource "aws_instance" "tg" {
- ami = data.aws_ami.ubuntu.id
- instance_type = var.instance_type
-# cpu_threads_per_core = 1
-# cpu_core_count = 18
- key_name = aws_key_pair.CSIT.key_name
- associate_public_ip_address = true
- subnet_id = aws_subnet.mgmt.id
- root_block_device {
- volume_size = 50
- }
- private_ip = var.tg_mgmt_ip
- vpc_security_group_ids = [aws_security_group.CSIT.id]
- depends_on = [aws_vpc.CSIT, aws_placement_group.CSIT]
- placement_group = aws_placement_group.CSIT.id
- source_dest_check = false
-}
-
-resource "aws_instance" "dut1" {
- ami = data.aws_ami.ubuntu.id
-# cpu_threads_per_core = 1
-# cpu_core_count = 18
- instance_type = var.instance_type
- key_name = aws_key_pair.CSIT.key_name
- associate_public_ip_address = true
- subnet_id = aws_subnet.mgmt.id
- root_block_device {
- volume_size = 50
- }
- private_ip = var.dut1_mgmt_ip
- vpc_security_group_ids = [aws_security_group.CSIT.id]
- depends_on = [aws_vpc.CSIT, aws_placement_group.CSIT]
- placement_group = aws_placement_group.CSIT.id
- source_dest_check = false
-}
-
-resource "aws_route" "CSIT-igw" {
- route_table_id = aws_vpc.CSIT.main_route_table_id
- gateway_id = aws_internet_gateway.CSIT.id
- destination_cidr_block = "0.0.0.0/0"
- depends_on = [aws_vpc.CSIT, aws_internet_gateway.CSIT]
-}
-resource "aws_route" "dummy-trex-port-0" {
- route_table_id = aws_vpc.CSIT.main_route_table_id
- network_interface_id = aws_instance.tg.primary_network_interface_id
- destination_cidr_block = var.trex_dummy_cidr_port_0
- depends_on = [aws_vpc.CSIT, aws_instance.dut1]
-}
-resource "aws_route" "dummy-trex-port-1" {
- route_table_id = aws_vpc.CSIT.main_route_table_id
- network_interface_id = aws_instance.tg.primary_network_interface_id
- destination_cidr_block = var.trex_dummy_cidr_port_1
- depends_on = [aws_vpc.CSIT, aws_instance.dut1]
-}
-
-resource "null_resource" "deploy_tg" {
- depends_on = [ aws_instance.tg ]
- connection {
- user = "ubuntu"
- host = aws_instance.tg.public_ip
- private_key = file("~/.ssh/id_rsa")
- }
- provisioner "ansible" {
- plays {
- playbook {
- file_path = "../../testbed-setup/ansible/site_aws.yaml"
- force_handlers = true
- }
- hosts = ["tg"]
- extra_vars = {
- ansible_python_interpreter = "/usr/bin/python3"
- aws = true
- }
- }
- }
-}
-resource "null_resource" "deploy_dut1" {
- depends_on = [ aws_instance.dut1 ]
- connection {
- user = "ubuntu"
- host = aws_instance.dut1.public_ip
- private_key = file("~/.ssh/id_rsa")
- }
- provisioner "ansible" {
- plays {
- playbook {
- file_path = "../../testbed-setup/ansible/site_aws.yaml"
- force_handlers = true
- }
- hosts = ["sut"]
- extra_vars = {
- ansible_python_interpreter = "/usr/bin/python3"
- aws = true
- }
- }
- }
-}
-
-resource "null_resource" "deploy_topology" {
- depends_on = [ aws_instance.tg, aws_instance.dut1 ]
- provisioner "ansible" {
- plays {
- playbook {
- file_path = "../../testbed-setup/ansible/cloud_topology.yaml"
- }
- hosts = ["local"]
- extra_vars = {
- ansible_python_interpreter = "/usr/bin/python3"
- cloud_topology = "2n_aws_c5n"
- tg_if1_mac = data.aws_network_interface.tg_if1.mac_address
- tg_if2_mac = data.aws_network_interface.tg_if2.mac_address
- dut1_if1_mac = data.aws_network_interface.dut1_if1.mac_address
- dut1_if2_mac = data.aws_network_interface.dut1_if2.mac_address
- tg_public_ip = aws_instance.tg.public_ip
- dut1_public_ip = aws_instance.dut1.public_ip
- }
- }
- }
-}
-
-output "dbg_tg" {
- value = "TG IP: ${aws_instance.tg.public_ip}"
-}
-
-output "dbg_dut1" {
- value = "DUT1 IP: ${aws_instance.dut1.public_ip}"
-}
-
diff --git a/resources/tools/terraform/2n_aws_c5n/nic.tf b/resources/tools/terraform/2n_aws_c5n/nic.tf
deleted file mode 100644
index b0a54e9b98..0000000000
--- a/resources/tools/terraform/2n_aws_c5n/nic.tf
+++ /dev/null
@@ -1,67 +0,0 @@
-resource "aws_network_interface" "dut1_if1" {
- subnet_id = aws_subnet.b.id
- source_dest_check = false
- private_ip = var.dut1_if1_ip
- private_ips = [var.dut1_if1_ip]
- security_groups = [aws_security_group.CSIT.id]
- attachment {
- instance = aws_instance.dut1.id
- device_index = 1
- }
- depends_on = [aws_vpc.CSIT, aws_subnet.b]
-}
-
-data "aws_network_interface" "dut1_if1" {
- id = aws_network_interface.dut1_if1.id
-}
-
-resource "aws_network_interface" "dut1_if2" {
- subnet_id = aws_subnet.d.id
- source_dest_check = false
- private_ip = var.dut1_if2_ip
- private_ips = [var.dut1_if2_ip]
- security_groups = [aws_security_group.CSIT.id]
- attachment {
- instance = aws_instance.dut1.id
- device_index = 2
- }
- depends_on = [aws_vpc.CSIT]
-}
-
-data "aws_network_interface" "dut1_if2" {
- id = aws_network_interface.dut1_if2.id
-}
-
-resource "aws_network_interface" "tg_if1" {
- subnet_id = aws_subnet.b.id
- source_dest_check = false
- private_ip = var.tg_if1_ip
- private_ips = [var.tg_if1_ip]
- security_groups = [aws_security_group.CSIT.id]
- attachment {
- instance = aws_instance.tg.id
- device_index = 1
- }
- depends_on = [aws_vpc.CSIT, aws_subnet.b]
-}
-
-data "aws_network_interface" "tg_if1" {
- id = aws_network_interface.tg_if1.id
-}
-
-resource "aws_network_interface" "tg_if2" {
- subnet_id = aws_subnet.d.id
- source_dest_check = false
- private_ip = var.tg_if2_ip
- private_ips = [var.tg_if2_ip]
- security_groups = [aws_security_group.CSIT.id]
- attachment {
- instance = aws_instance.tg.id
- device_index = 2
- }
- depends_on = [aws_vpc.CSIT, aws_subnet.d]
-}
-
-data "aws_network_interface" "tg_if2" {
- id = aws_network_interface.tg_if2.id
-}
diff --git a/resources/tools/terraform/3n_aws_c5n/.gitignore b/resources/tools/terraform/3n_aws_c5n/.gitignore
deleted file mode 100644
index fc64f0039f..0000000000
--- a/resources/tools/terraform/3n_aws_c5n/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-.terraform/
-.terraform.tfstate.lock.info
-terraform.tfstate
-terraform.tfstate.backup
diff --git a/resources/tools/terraform/3n_aws_c5n/main.tf b/resources/tools/terraform/3n_aws_c5n/main.tf
deleted file mode 100644
index 9ba2b19abe..0000000000
--- a/resources/tools/terraform/3n_aws_c5n/main.tf
+++ /dev/null
@@ -1,361 +0,0 @@
-provider "aws" {
- region = "eu-central-1"
-}
-
-variable "avail_zone" {
- type = string
- default = "eu-central-1a"
-}
-# Base VPC CIDRs
-variable "vpc_cidr_mgmt" {
- type = string
- default = "192.168.0.0/24"
-}
-variable "vpc_cidr_b" {
- type = string
- default = "192.168.10.0/24"
-}
-variable "vpc_cidr_c" {
- type = string
- default = "200.0.0.0/24"
-}
-variable "vpc_cidr_d" {
- type = string
- default = "192.168.20.0/24"
-}
-
-# Trex Dummy CIDRs
-variable "trex_dummy_cidr_port_0" {
- type = string
- default = "10.0.0.0/24"
-}
-variable "trex_dummy_cidr_port_1" {
- type = string
- default = "20.0.0.0/24"
-}
-
-# IPs
-variable "tg_if1_ip" {
- type = string
- default = "192.168.10.254"
-}
-variable "tg_if2_ip" {
- type = string
- default = "192.168.20.254"
-}
-variable "dut1_if1_ip" {
- type = string
- default = "192.168.10.11"
-}
-variable "dut1_if2_ip" {
- type = string
- default = "200.0.0.101"
-}
-variable "dut2_if1_ip" {
- type = string
- default = "200.0.0.102"
-}
-variable "dut2_if2_ip" {
- type = string
- default = "192.168.20.11"
-}
-variable "tg_mgmt_ip" {
- type = string
- default = "192.168.0.10"
-}
-variable "dut1_mgmt_ip" {
- type = string
- default = "192.168.0.11"
-}
-variable "dut2_mgmt_ip" {
- type = string
- default = "192.168.0.12"
-}
-
-# Instance Type
-variable "instance_type" {
- type = string
- default = "c5n.9xlarge"
-}
-
-resource "aws_vpc" "CSIT" {
- cidr_block = var.vpc_cidr_mgmt
-}
-
-resource "aws_security_group" "CSIT" {
- name = "CSIT"
- description = "Allow inbound traffic"
- vpc_id = aws_vpc.CSIT.id
-
- ingress {
- from_port = 22
- to_port = 22
- protocol = "tcp"
- cidr_blocks = ["0.0.0.0/0"]
- }
-
- ingress {
- from_port = 0
- to_port = 0
- protocol = -1
- self = true
- }
-
- egress {
- from_port = 0
- to_port = 0
- protocol = "-1"
- cidr_blocks = ["0.0.0.0/0"]
- }
-
- depends_on = [aws_vpc.CSIT]
-}
-
-resource "aws_vpc_ipv4_cidr_block_association" "b" {
- vpc_id = aws_vpc.CSIT.id
- cidr_block = var.vpc_cidr_b
- depends_on = [aws_vpc.CSIT]
-}
-resource "aws_vpc_ipv4_cidr_block_association" "c" {
- vpc_id = aws_vpc.CSIT.id
- cidr_block = var.vpc_cidr_c
- depends_on = [aws_vpc.CSIT]
-}
-resource "aws_vpc_ipv4_cidr_block_association" "d" {
- vpc_id = aws_vpc.CSIT.id
- cidr_block = var.vpc_cidr_d
- depends_on = [aws_vpc.CSIT]
-}
-
-resource "aws_subnet" "mgmt" {
- vpc_id = aws_vpc.CSIT.id
- cidr_block = var.vpc_cidr_mgmt
- availability_zone = var.avail_zone
- depends_on = [aws_vpc.CSIT]
-}
-
-resource "aws_subnet" "b" {
- vpc_id = aws_vpc.CSIT.id
- cidr_block = var.vpc_cidr_b
- availability_zone = var.avail_zone
- depends_on = [aws_vpc.CSIT, aws_vpc_ipv4_cidr_block_association.b]
-}
-
-resource "aws_subnet" "c" {
- vpc_id = aws_vpc.CSIT.id
- cidr_block = var.vpc_cidr_c
- availability_zone = var.avail_zone
- depends_on = [aws_vpc.CSIT, aws_vpc_ipv4_cidr_block_association.c]
-}
-
-resource "aws_subnet" "d" {
- vpc_id = aws_vpc.CSIT.id
- cidr_block = var.vpc_cidr_d
- availability_zone = var.avail_zone
- depends_on = [aws_vpc.CSIT, aws_vpc_ipv4_cidr_block_association.d]
-}
-
-resource "aws_internet_gateway" "CSIT" {
- vpc_id = aws_vpc.CSIT.id
- depends_on = [aws_vpc.CSIT]
-}
-
-resource "aws_key_pair" "CSIT" {
- key_name = "CSIT"
- public_key = file("~/.ssh/id_rsa.pub")
-}
-
-data "aws_ami" "ubuntu" {
- most_recent = true
-
- filter {
- name = "name"
- values = ["*hvm-ssd/ubuntu-bionic-18.04-amd64*"]
- }
-
- filter {
- name = "virtualization-type"
- values = ["hvm"]
- }
-
- owners = ["099720109477"] # Canonical
-}
-
-resource "aws_placement_group" "CSIT" {
- name = "CSIT"
- strategy = "cluster"
-}
-
-resource "aws_instance" "tg" {
- ami = data.aws_ami.ubuntu.id
- instance_type = var.instance_type
-# cpu_threads_per_core = 1
-# cpu_core_count = 18
- key_name = aws_key_pair.CSIT.key_name
- associate_public_ip_address = true
- subnet_id = aws_subnet.mgmt.id
- root_block_device {
- volume_size = 50
- }
- private_ip = var.tg_mgmt_ip
- vpc_security_group_ids = [aws_security_group.CSIT.id]
- depends_on = [aws_vpc.CSIT, aws_placement_group.CSIT]
- placement_group = aws_placement_group.CSIT.id
- source_dest_check = false
-}
-
-resource "aws_instance" "dut1" {
- ami = data.aws_ami.ubuntu.id
-# cpu_threads_per_core = 1
-# cpu_core_count = 18
- instance_type = var.instance_type
- key_name = aws_key_pair.CSIT.key_name
- associate_public_ip_address = true
- subnet_id = aws_subnet.mgmt.id
- root_block_device {
- volume_size = 50
- }
- private_ip = var.dut1_mgmt_ip
- vpc_security_group_ids = [aws_security_group.CSIT.id]
- depends_on = [aws_vpc.CSIT, aws_placement_group.CSIT]
- placement_group = aws_placement_group.CSIT.id
- source_dest_check = false
-}
-
-resource "aws_instance" "dut2" {
- ami = data.aws_ami.ubuntu.id
-# cpu_threads_per_core = 1
-# cpu_core_count = 18
- instance_type = var.instance_type
- key_name = aws_key_pair.CSIT.key_name
- associate_public_ip_address = true
- subnet_id = aws_subnet.mgmt.id
- root_block_device {
- volume_size = 50
- }
- private_ip = var.dut2_mgmt_ip
- vpc_security_group_ids = [aws_security_group.CSIT.id]
- depends_on = [aws_vpc.CSIT, aws_placement_group.CSIT]
- placement_group = aws_placement_group.CSIT.id
- source_dest_check = false
-}
-
-resource "aws_route" "CSIT-igw" {
- route_table_id = aws_vpc.CSIT.main_route_table_id
- gateway_id = aws_internet_gateway.CSIT.id
- destination_cidr_block = "0.0.0.0/0"
- depends_on = [aws_vpc.CSIT, aws_internet_gateway.CSIT]
-}
-resource "aws_route" "dummy-trex-port-0" {
- route_table_id = aws_vpc.CSIT.main_route_table_id
- network_interface_id = aws_instance.tg.primary_network_interface_id
- destination_cidr_block = var.trex_dummy_cidr_port_0
- depends_on = [aws_vpc.CSIT, aws_instance.dut1]
-}
-resource "aws_route" "dummy-trex-port-1" {
- route_table_id = aws_vpc.CSIT.main_route_table_id
- network_interface_id = aws_instance.tg.primary_network_interface_id
- destination_cidr_block = var.trex_dummy_cidr_port_1
- depends_on = [aws_vpc.CSIT, aws_instance.dut2]
-}
-
-resource "null_resource" "deploy_tg" {
- depends_on = [ aws_instance.tg ]
- connection {
- user = "ubuntu"
- host = aws_instance.tg.public_ip
- private_key = file("~/.ssh/id_rsa")
- }
- provisioner "ansible" {
- plays {
- playbook {
- file_path = "../../testbed-setup/ansible/site_aws.yaml"
- force_handlers = true
- }
- hosts = ["tg"]
- extra_vars = {
- ansible_python_interpreter = "/usr/bin/python3"
- aws = true
- }
- }
- }
-}
-resource "null_resource" "deploy_dut1" {
- depends_on = [ aws_instance.dut1 ]
- connection {
- user = "ubuntu"
- host = aws_instance.dut1.public_ip
- private_key = file("~/.ssh/id_rsa")
- }
- provisioner "ansible" {
- plays {
- playbook {
- file_path = "../../testbed-setup/ansible/site_aws.yaml"
- force_handlers = true
- }
- hosts = ["sut"]
- extra_vars = {
- ansible_python_interpreter = "/usr/bin/python3"
- aws = true
- }
- }
- }
-}
-resource "null_resource" "deploy_dut2" {
- depends_on = [ aws_instance.dut2 ]
- connection {
- user = "ubuntu"
- host = aws_instance.dut2.public_ip
- private_key = file("~/.ssh/id_rsa")
- }
- provisioner "ansible" {
- plays {
- playbook {
- file_path = "../../testbed-setup/ansible/site_aws.yaml"
- force_handlers = true
- }
- hosts = ["sut"]
- extra_vars = {
- ansible_python_interpreter = "/usr/bin/python3"
- aws = true
- }
- }
- }
-}
-
-resource "null_resource" "deploy_topology" {
- depends_on = [ aws_instance.tg, aws_instance.dut1, aws_instance.dut2 ]
- provisioner "ansible" {
- plays {
- playbook {
- file_path = "../../testbed-setup/ansible/cloud_topology.yaml"
- }
- hosts = ["local"]
- extra_vars = {
- ansible_python_interpreter = "/usr/bin/python3"
- cloud_topology = "3n_aws_c5n"
- tg_if1_mac = data.aws_network_interface.tg_if1.mac_address
- tg_if2_mac = data.aws_network_interface.tg_if2.mac_address
- dut1_if1_mac = data.aws_network_interface.dut1_if1.mac_address
- dut1_if2_mac = data.aws_network_interface.dut1_if2.mac_address
- dut2_if1_mac = data.aws_network_interface.dut2_if1.mac_address
- dut2_if2_mac = data.aws_network_interface.dut2_if2.mac_address
- tg_public_ip = aws_instance.tg.public_ip
- dut1_public_ip = aws_instance.dut1.public_ip
- dut2_public_ip = aws_instance.dut2.public_ip
- }
- }
- }
-}
-
-output "dbg_tg" {
- value = "TG IP: ${aws_instance.tg.public_ip}"
-}
-
-output "dbg_dut1" {
- value = "DUT1 IP: ${aws_instance.dut1.public_ip}"
-}
-
-output "dbg_dut2" {
- value = "DUT2 IP: ${aws_instance.dut2.public_ip}"
-}
diff --git a/resources/tools/terraform/3n_aws_c5n/nic.tf b/resources/tools/terraform/3n_aws_c5n/nic.tf
deleted file mode 100644
index 3efd74fc14..0000000000
--- a/resources/tools/terraform/3n_aws_c5n/nic.tf
+++ /dev/null
@@ -1,101 +0,0 @@
-resource "aws_network_interface" "dut1_if1" {
- subnet_id = aws_subnet.b.id
- source_dest_check = false
- private_ip = var.dut1_if1_ip
- private_ips = [var.dut1_if1_ip]
- security_groups = [aws_security_group.CSIT.id]
- attachment {
- instance = aws_instance.dut1.id
- device_index = 1
- }
- depends_on = [aws_vpc.CSIT, aws_subnet.b]
-}
-
-data "aws_network_interface" "dut1_if1" {
- id = aws_network_interface.dut1_if1.id
-}
-
-resource "aws_network_interface" "dut1_if2" {
- subnet_id = aws_subnet.c.id
- source_dest_check = false
- private_ip = var.dut1_if2_ip
- private_ips = [var.dut1_if2_ip]
- security_groups = [aws_security_group.CSIT.id]
- attachment {
- instance = aws_instance.dut1.id
- device_index = 2
- }
- depends_on = [aws_vpc.CSIT]
-}
-
-data "aws_network_interface" "dut1_if2" {
- id = aws_network_interface.dut1_if2.id
-}
-
-resource "aws_network_interface" "dut2_if1" {
- subnet_id = aws_subnet.c.id
- source_dest_check = false
- private_ip = var.dut2_if1_ip
- private_ips = [var.dut2_if1_ip]
- security_groups = [aws_security_group.CSIT.id]
- attachment {
- instance = aws_instance.dut2.id
- device_index = 1
- }
- depends_on = [aws_vpc.CSIT, aws_subnet.c]
-}
-
-data "aws_network_interface" "dut2_if1" {
- id = aws_network_interface.dut2_if1.id
-}
-
-resource "aws_network_interface" "dut2_if2" {
- subnet_id = aws_subnet.d.id
- source_dest_check = false
- private_ip = var.dut2_if2_ip
- private_ips = [var.dut2_if2_ip]
- security_groups = [aws_security_group.CSIT.id]
- attachment {
- instance = aws_instance.dut2.id
- device_index = 2
- }
- depends_on = [aws_vpc.CSIT, aws_subnet.d]
-}
-
-data "aws_network_interface" "dut2_if2" {
- id = aws_network_interface.dut2_if2.id
-}
-
-resource "aws_network_interface" "tg_if1" {
- subnet_id = aws_subnet.b.id
- source_dest_check = false
- private_ip = var.tg_if1_ip
- private_ips = [var.tg_if1_ip]
- security_groups = [aws_security_group.CSIT.id]
- attachment {
- instance = aws_instance.tg.id
- device_index = 1
- }
- depends_on = [aws_vpc.CSIT, aws_subnet.b]
-}
-
-data "aws_network_interface" "tg_if1" {
- id = aws_network_interface.tg_if1.id
-}
-
-resource "aws_network_interface" "tg_if2" {
- subnet_id = aws_subnet.d.id
- source_dest_check = false
- private_ip = var.tg_if2_ip
- private_ips = [var.tg_if2_ip]
- security_groups = [aws_security_group.CSIT.id]
- attachment {
- instance = aws_instance.tg.id
- device_index = 2
- }
- depends_on = [aws_vpc.CSIT, aws_subnet.d]
-}
-
-data "aws_network_interface" "tg_if2" {
- id = aws_network_interface.tg_if2.id
-}
diff --git a/resources/tools/terraform/3n_azure_fsv2/.gitignore b/resources/tools/terraform/3n_azure_fsv2/.gitignore
deleted file mode 100644
index fc64f0039f..0000000000
--- a/resources/tools/terraform/3n_azure_fsv2/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-.terraform/
-.terraform.tfstate.lock.info
-terraform.tfstate
-terraform.tfstate.backup
diff --git a/resources/tools/terraform/3n_azure_fsv2/main.tf b/resources/tools/terraform/3n_azure_fsv2/main.tf
deleted file mode 100644
index 9f6739e676..0000000000
--- a/resources/tools/terraform/3n_azure_fsv2/main.tf
+++ /dev/null
@@ -1,593 +0,0 @@
-provider "azurerm" {
- version = ">= 1.4.0"
-}
-
-# Variables
-
-variable "vpc_addr_space_a" {
- type = string
- default = "172.16.0.0/16"
-}
-
-variable "vpc_cidr_a" {
- type = string
- default = "172.16.0.0/24"
-}
-
-variable "vpc_cidr_b" {
- type = string
- default = "172.16.10.0/24"
-}
-
-variable "vpc_cidr_c" {
- type = string
- default = "172.16.200.0/24"
-}
-
-variable "vpc_cidr_d" {
- type = string
- default = "172.16.20.0/24"
-}
-
-variable "trex_dummy_cidr_port_0" {
- type = string
- default = "172.16.11.0/24"
-}
-
-variable "trex_dummy_cidr_port_1" {
- type = string
- default = "172.16.21.0/24"
-}
-
-# Create resource group and resources
-
-resource "azurerm_resource_group" "CSIT" {
- name = "CSIT"
- #location = "East US"
- location = "UK South"
-}
-
-resource "azurerm_virtual_network" "CSIT" {
- name = "CSIT-network"
- resource_group_name = azurerm_resource_group.CSIT.name
- location = azurerm_resource_group.CSIT.location
- address_space = [ var.vpc_addr_space_a ]
- depends_on = [ azurerm_resource_group.CSIT ]
-}
-
-resource "azurerm_subnet" "a" {
- name = "subnet_a"
- resource_group_name = azurerm_resource_group.CSIT.name
- virtual_network_name = azurerm_virtual_network.CSIT.name
- address_prefix = var.vpc_cidr_a
- depends_on = [ azurerm_resource_group.CSIT ]
-}
-
-resource "azurerm_subnet" "b" {
- name = "subnet_b"
- resource_group_name = azurerm_resource_group.CSIT.name
- virtual_network_name = azurerm_virtual_network.CSIT.name
- address_prefix = var.vpc_cidr_b
- depends_on = [ azurerm_resource_group.CSIT ]
-}
-
-resource "azurerm_subnet" "c" {
- name = "subnet_c"
- resource_group_name = azurerm_resource_group.CSIT.name
- virtual_network_name = azurerm_virtual_network.CSIT.name
- address_prefix = var.vpc_cidr_c
- depends_on = [ azurerm_resource_group.CSIT ]
-}
-
-resource "azurerm_subnet" "d" {
- name = "subnet_d"
- resource_group_name = azurerm_resource_group.CSIT.name
- virtual_network_name = azurerm_virtual_network.CSIT.name
- address_prefix = var.vpc_cidr_d
- depends_on = [ azurerm_resource_group.CSIT ]
-}
-
-# Create a security group of the Kiknos instances
-
-resource "azurerm_network_security_group" "CSIT" {
- name = "CSIT"
- resource_group_name = azurerm_resource_group.CSIT.name
- location = azurerm_resource_group.CSIT.location
- security_rule {
- name = "IpSec"
- priority = 100
- direction = "Inbound"
- access = "Allow"
- protocol = "Udp"
- source_port_range = "*"
- destination_port_range = "500"
- source_address_prefix = "*"
- destination_address_prefix = "*"
- }
- security_rule {
- name = "IpSec-NAT"
- priority = 101
- direction = "Inbound"
- access = "Allow"
- protocol = "Udp"
- source_port_range = "*"
- destination_port_range = "4500"
- source_address_prefix = "*"
- destination_address_prefix = "*"
- }
- security_rule {
- name = "SSH"
- priority = 102
- direction = "Inbound"
- access = "Allow"
- protocol = "Tcp"
- source_port_range = "*"
- destination_port_range = "22"
- source_address_prefix = "*"
- destination_address_prefix = "*"
- }
- security_rule {
- name = "InboundAll"
- priority = 103
- direction = "Inbound"
- access = "Allow"
- protocol = "*"
- source_port_range = "*"
- destination_port_range = "*"
- source_address_prefix = "*"
- destination_address_prefix = "*"
- }
- security_rule {
- name = "Outbound"
- priority = 104
- direction = "Outbound"
- access = "Allow"
- protocol = "*"
- source_port_range = "*"
- destination_port_range = "*"
- source_address_prefix = "*"
- destination_address_prefix = "*"
- }
- depends_on = [azurerm_virtual_network.CSIT]
-}
-
-# Create public IPs
-
-resource "azurerm_public_ip" "tg_public_ip" {
- name = "tg_public_ip"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- allocation_method = "Dynamic"
- depends_on = [ azurerm_resource_group.CSIT ]
-}
-
-resource "azurerm_public_ip" "dut1_public_ip" {
- name = "dut1_public_ip"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- allocation_method = "Dynamic"
- depends_on = [ azurerm_resource_group.CSIT ]
-}
-
-resource "azurerm_public_ip" "dut2_public_ip" {
- name = "dut2_public_ip"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- allocation_method = "Dynamic"
- depends_on = [ azurerm_resource_group.CSIT ]
-}
-
-# Create network interface
-
-resource "azurerm_network_interface" "tg_mng" {
- name = "tg_mng"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- network_security_group_id = azurerm_network_security_group.CSIT.id
- ip_configuration {
- primary = "true"
- name = "tg_mng_ip"
- subnet_id = azurerm_subnet.a.id
- private_ip_address_allocation = "Static"
- private_ip_address = "172.16.0.10"
- public_ip_address_id = azurerm_public_ip.tg_public_ip.id
- }
- depends_on = [ azurerm_resource_group.CSIT,
- azurerm_subnet.a,
- azurerm_public_ip.tg_public_ip ]
-}
-
-resource "azurerm_network_interface" "dut1_mng" {
- name = "dut1_mng"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- network_security_group_id = azurerm_network_security_group.CSIT.id
- ip_configuration {
- primary = "true"
- name = "dut1_mng_ip"
- subnet_id = azurerm_subnet.a.id
- private_ip_address_allocation = "Static"
- private_ip_address = "172.16.0.11"
- public_ip_address_id = azurerm_public_ip.dut1_public_ip.id
- }
- depends_on = [ azurerm_resource_group.CSIT,
- azurerm_subnet.a,
- azurerm_public_ip.dut1_public_ip ]
-}
-
-resource "azurerm_network_interface" "dut2_mng" {
- name = "dut2_mng"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- network_security_group_id = azurerm_network_security_group.CSIT.id
- ip_configuration {
- primary = "true"
- name = "dut2_mng_ip"
- subnet_id = azurerm_subnet.a.id
- private_ip_address_allocation = "Static"
- private_ip_address = "172.16.0.12"
- public_ip_address_id = azurerm_public_ip.dut2_public_ip.id
- }
- depends_on = [ azurerm_resource_group.CSIT,
- azurerm_subnet.a,
- azurerm_public_ip.dut2_public_ip ]
-}
-
-resource "azurerm_route_table" "b" {
- name = "b"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- depends_on = [ azurerm_resource_group.CSIT,
- azurerm_subnet.b ]
- disable_bgp_route_propagation = false
- route {
- name = "route-10"
- address_prefix = var.trex_dummy_cidr_port_0
- next_hop_type = "VirtualAppliance"
- next_hop_in_ip_address = data.azurerm_network_interface.tg_if1.private_ip_address
- }
- route {
- name = "route-20"
- address_prefix = var.trex_dummy_cidr_port_1
- next_hop_type = "VirtualAppliance"
- next_hop_in_ip_address = data.azurerm_network_interface.dut1_if1.private_ip_address
- }
- route {
- name = "tg2"
- address_prefix = var.vpc_cidr_d
- next_hop_type = "VirtualAppliance"
- next_hop_in_ip_address = data.azurerm_network_interface.dut1_if1.private_ip_address
- }
-}
-
-resource "azurerm_route_table" "c" {
- name = "c"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- depends_on = [ azurerm_resource_group.CSIT,
- azurerm_subnet.c ]
- disable_bgp_route_propagation = false
- route {
- name = "route-10"
- address_prefix = var.trex_dummy_cidr_port_0
- next_hop_type = "VirtualAppliance"
- next_hop_in_ip_address = data.azurerm_network_interface.dut1_if2.private_ip_address
- }
- route {
- name = "route-100"
- address_prefix = "100.0.0.0/8"
- next_hop_type = "VirtualAppliance"
- next_hop_in_ip_address = data.azurerm_network_interface.dut1_if2.private_ip_address
- }
- route {
- name = "route-20"
- address_prefix = var.trex_dummy_cidr_port_1
- next_hop_type = "VirtualAppliance"
- next_hop_in_ip_address = data.azurerm_network_interface.dut2_if1.private_ip_address
- }
- route {
- name = "tg1"
- address_prefix = var.vpc_cidr_b
- next_hop_type = "VirtualAppliance"
- next_hop_in_ip_address = data.azurerm_network_interface.dut1_if2.private_ip_address
- }
- route {
- name = "tg2"
- address_prefix = var.vpc_cidr_d
- next_hop_type = "VirtualAppliance"
- next_hop_in_ip_address = data.azurerm_network_interface.dut2_if1.private_ip_address
- }
-}
-
-resource "azurerm_route_table" "d" {
- name = "d"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- depends_on = [ azurerm_resource_group.CSIT,
- azurerm_subnet.d ]
- disable_bgp_route_propagation = false
- route {
- name = "route-10"
- address_prefix = var.trex_dummy_cidr_port_0
- next_hop_type = "VirtualAppliance"
- next_hop_in_ip_address = data.azurerm_network_interface.dut2_if2.private_ip_address
- }
- route {
- name = "route-20"
- address_prefix = var.trex_dummy_cidr_port_1
- next_hop_type = "VirtualAppliance"
- next_hop_in_ip_address = data.azurerm_network_interface.tg_if2.private_ip_address
- }
- route {
- name = "tg1"
- address_prefix = var.vpc_cidr_b
- next_hop_type = "VirtualAppliance"
- next_hop_in_ip_address = data.azurerm_network_interface.dut2_if2.private_ip_address
- }
-}
-
-resource "azurerm_subnet_route_table_association" "b" {
- subnet_id = azurerm_subnet.b.id
- route_table_id = azurerm_route_table.b.id
-}
-
-resource "azurerm_subnet_route_table_association" "c" {
- subnet_id = azurerm_subnet.c.id
- route_table_id = azurerm_route_table.c.id
-}
-
-resource "azurerm_subnet_route_table_association" "d" {
- subnet_id = azurerm_subnet.d.id
- route_table_id = azurerm_route_table.d.id
-}
-
-resource "azurerm_virtual_machine" "tg" {
- name = "tg"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- primary_network_interface_id = azurerm_network_interface.tg_mng.id
- network_interface_ids = [ azurerm_network_interface.tg_mng.id,
- azurerm_network_interface.tg_if1.id,
- azurerm_network_interface.tg_if2.id ]
- vm_size = "Standard_F32s_v2"
- delete_os_disk_on_termination = true
- delete_data_disks_on_termination = true
- storage_os_disk {
- name = "OsDiskTG"
- caching = "ReadWrite"
- create_option = "FromImage"
- managed_disk_type = "StandardSSD_LRS"
- }
- storage_image_reference {
- publisher = "Canonical"
- offer = "UbuntuServer"
- sku = "18.04-LTS"
- version = "latest"
- }
- os_profile {
- computer_name = "tg"
- admin_username = "ubuntu"
- }
- os_profile_linux_config {
- disable_password_authentication = true
- ssh_keys {
- path = "/home/ubuntu/.ssh/authorized_keys"
- key_data = file("~/.ssh/id_rsa.pub")
- }
- }
- depends_on = [ azurerm_resource_group.CSIT,
- azurerm_network_interface.tg_mng ]
-}
-
-resource "azurerm_virtual_machine" "dut1" {
- name = "dut1"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- primary_network_interface_id = azurerm_network_interface.dut1_mng.id
- network_interface_ids = [ azurerm_network_interface.dut1_mng.id,
- azurerm_network_interface.dut1_if1.id,
- azurerm_network_interface.dut1_if2.id ]
- vm_size = "Standard_F32s_v2"
- delete_os_disk_on_termination = true
- delete_data_disks_on_termination = true
- storage_os_disk {
- name = "OsDiskDUT1"
- caching = "ReadWrite"
- create_option = "FromImage"
- managed_disk_type = "StandardSSD_LRS"
- }
- storage_image_reference {
- publisher = "Canonical"
- offer = "UbuntuServer"
- sku = "18.04-LTS"
- version = "latest"
- }
- os_profile {
- computer_name = "dut1"
- admin_username = "ubuntu"
- }
- os_profile_linux_config {
- disable_password_authentication = true
- ssh_keys {
- path = "/home/ubuntu/.ssh/authorized_keys"
- key_data = file("~/.ssh/id_rsa.pub")
- }
- }
- depends_on = [ azurerm_resource_group.CSIT,
- azurerm_network_interface.dut1_mng ]
-}
-
-resource "azurerm_virtual_machine" "dut2" {
- name = "dut2"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- primary_network_interface_id = azurerm_network_interface.dut2_mng.id
- network_interface_ids = [ azurerm_network_interface.dut2_mng.id,
- azurerm_network_interface.dut2_if1.id,
- azurerm_network_interface.dut2_if2.id ]
- vm_size = "Standard_F32s_v2"
- delete_os_disk_on_termination = true
- delete_data_disks_on_termination = true
- storage_os_disk {
- name = "OsDiskDUT2"
- caching = "ReadWrite"
- create_option = "FromImage"
- managed_disk_type = "StandardSSD_LRS"
- }
- storage_image_reference {
- publisher = "Canonical"
- offer = "UbuntuServer"
- sku = "18.04-LTS"
- version = "latest"
- }
- os_profile {
- computer_name = "dut2"
- admin_username = "ubuntu"
- }
- os_profile_linux_config {
- disable_password_authentication = true
- ssh_keys {
- path = "/home/ubuntu/.ssh/authorized_keys"
- key_data = file("~/.ssh/id_rsa.pub")
- }
- }
- depends_on = [ azurerm_resource_group.CSIT,
- azurerm_network_interface.dut2_mng ]
-}
-
-data "azurerm_public_ip" "tg_public_ip" {
- name = "tg_public_ip"
- resource_group_name = azurerm_resource_group.CSIT.name
- depends_on = [ azurerm_virtual_machine.tg ]
-}
-
-data "azurerm_public_ip" "dut1_public_ip" {
- name = "dut1_public_ip"
- resource_group_name = azurerm_resource_group.CSIT.name
- depends_on = [ azurerm_virtual_machine.dut1 ]
-}
-
-data "azurerm_public_ip" "dut2_public_ip" {
- name = "dut2_public_ip"
- resource_group_name = azurerm_resource_group.CSIT.name
- depends_on = [ azurerm_virtual_machine.dut2 ]
-}
-
-# Provisioning
-
-resource "null_resource" "deploy_tg" {
- depends_on = [ azurerm_virtual_machine.tg,
- azurerm_network_interface.tg_if1,
- azurerm_network_interface.tg_if2 ]
- connection {
- user = "ubuntu"
- host = data.azurerm_public_ip.tg_public_ip.ip_address
- private_key = file("~/.ssh/id_rsa")
- }
- provisioner "ansible" {
- plays {
- playbook {
- file_path = "../../testbed-setup/ansible/site_azure.yaml"
- force_handlers = true
- }
- hosts = ["tg"]
- extra_vars = {
- ansible_python_interpreter = "/usr/bin/python3"
- azure = true
- }
- }
- }
-}
-
-resource "null_resource" "deploy_dut1" {
- depends_on = [ azurerm_virtual_machine.dut1,
- azurerm_network_interface.dut1_if1,
- azurerm_network_interface.dut1_if2 ]
- connection {
- user = "ubuntu"
- host = data.azurerm_public_ip.dut1_public_ip.ip_address
- private_key = file("~/.ssh/id_rsa")
- }
- provisioner "ansible" {
- plays {
- playbook {
- file_path = "../../testbed-setup/ansible/site_azure.yaml"
- force_handlers = true
- }
- hosts = ["sut"]
- extra_vars = {
- ansible_python_interpreter = "/usr/bin/python3"
- azure = true
- }
- }
- }
-}
-
-resource "null_resource" "deploy_dut2" {
- depends_on = [ azurerm_virtual_machine.dut2,
- azurerm_network_interface.dut2_if1,
- azurerm_network_interface.dut2_if2 ]
- connection {
- user = "ubuntu"
- host = data.azurerm_public_ip.dut2_public_ip.ip_address
- private_key = file("~/.ssh/id_rsa")
- }
- provisioner "ansible" {
- plays {
- playbook {
- file_path = "../../testbed-setup/ansible/site_azure.yaml"
- force_handlers = true
- }
- hosts = ["sut"]
- extra_vars = {
- ansible_python_interpreter = "/usr/bin/python3"
- azure = true
- }
- }
- }
-}
-
-resource "null_resource" "deploy_topology" {
- depends_on = [ azurerm_virtual_machine.tg,
- azurerm_network_interface.tg_if1,
- azurerm_network_interface.tg_if2,
- azurerm_virtual_machine.dut1,
- azurerm_network_interface.dut1_if1,
- azurerm_network_interface.dut1_if2,
- azurerm_virtual_machine.dut2,
- azurerm_network_interface.dut2_if1,
- azurerm_network_interface.dut2_if2 ]
- provisioner "ansible" {
- plays {
- playbook {
- file_path = "../../testbed-setup/ansible/cloud_topology.yaml"
- }
- hosts = ["local"]
- extra_vars = {
- ansible_python_interpreter = "/usr/bin/python3"
- cloud_topology = "3n_azure_Fsv2"
- tg_if1_mac = data.azurerm_network_interface.tg_if1.mac_address
- tg_if2_mac = data.azurerm_network_interface.tg_if2.mac_address
- dut1_if1_mac = data.azurerm_network_interface.dut1_if1.mac_address
- dut1_if2_mac = data.azurerm_network_interface.dut1_if2.mac_address
- dut2_if1_mac = data.azurerm_network_interface.dut2_if1.mac_address
- dut2_if2_mac = data.azurerm_network_interface.dut2_if2.mac_address
- tg_public_ip = data.azurerm_public_ip.tg_public_ip.ip_address
- dut1_public_ip = data.azurerm_public_ip.dut1_public_ip.ip_address
- dut2_public_ip = data.azurerm_public_ip.dut2_public_ip.ip_address
- }
- }
- }
-}
-
-output "dbg_tg" {
- value = "TG IP: ${data.azurerm_public_ip.tg_public_ip.ip_address}"
-}
-
-output "dbg_dut1" {
- value = "DUT1 IP: ${data.azurerm_public_ip.dut1_public_ip.ip_address}"
-}
-
-output "dbg_dut2" {
- value = "DUT2 IP: ${data.azurerm_public_ip.dut2_public_ip.ip_address}"
-}
diff --git a/resources/tools/terraform/3n_azure_fsv2/nic.tf b/resources/tools/terraform/3n_azure_fsv2/nic.tf
deleted file mode 100644
index 51692593c6..0000000000
--- a/resources/tools/terraform/3n_azure_fsv2/nic.tf
+++ /dev/null
@@ -1,133 +0,0 @@
-# Create a network interface for the data-plane traffic
-
-resource "azurerm_network_interface" "dut1_if2" {
- name = "dut1_if2"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- network_security_group_id = azurerm_network_security_group.CSIT.id
- enable_ip_forwarding = "true"
- enable_accelerated_networking = "true"
-
- ip_configuration {
- name = "dut1_if2"
- subnet_id = azurerm_subnet.c.id
- private_ip_address_allocation = "Static"
- private_ip_address = "172.16.200.101"
- }
-}
-
-data "azurerm_network_interface" "dut1_if2" {
- name = "dut1_if2"
- resource_group_name = azurerm_resource_group.CSIT.name
- depends_on = [ azurerm_virtual_machine.dut1 ]
-}
-
-resource "azurerm_network_interface" "dut2_if1" {
- name = "dut2_if1"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- network_security_group_id = azurerm_network_security_group.CSIT.id
- enable_ip_forwarding = "true"
- enable_accelerated_networking = "true"
-
- ip_configuration {
- name = "dut2_if1"
- subnet_id = azurerm_subnet.c.id
- private_ip_address_allocation = "Static"
- private_ip_address = "172.16.200.102"
- }
-}
-
-data "azurerm_network_interface" "dut2_if1" {
- name = "dut2_if1"
- resource_group_name = azurerm_resource_group.CSIT.name
- depends_on = [ azurerm_virtual_machine.dut2 ]
-}
-
-resource "azurerm_network_interface" "dut1_if1" {
- name = "dut1_if1"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- network_security_group_id = azurerm_network_security_group.CSIT.id
- enable_ip_forwarding = "true"
- enable_accelerated_networking = "true"
-
- ip_configuration {
- name = "dut1_if1"
- subnet_id = azurerm_subnet.b.id
- private_ip_address_allocation = "Static"
- private_ip_address = "172.16.10.11"
- }
-}
-
-data "azurerm_network_interface" "dut1_if1" {
- name = "dut1_if1"
- resource_group_name = azurerm_resource_group.CSIT.name
- depends_on = [ azurerm_virtual_machine.dut1 ]
-}
-
-resource "azurerm_network_interface" "dut2_if2" {
- name = "dut2_if2"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- network_security_group_id = azurerm_network_security_group.CSIT.id
- enable_ip_forwarding = "true"
- enable_accelerated_networking = "true"
-
- ip_configuration {
- name = "dut2_if2"
- subnet_id = azurerm_subnet.d.id
- private_ip_address_allocation = "Static"
- private_ip_address = "172.16.20.11"
- }
-}
-
-data "azurerm_network_interface" "dut2_if2" {
- name = "dut2_if2"
- resource_group_name = azurerm_resource_group.CSIT.name
- depends_on = [ azurerm_virtual_machine.dut2 ]
-}
-
-resource "azurerm_network_interface" "tg_if1" {
- name = "tg_if1"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- network_security_group_id = azurerm_network_security_group.CSIT.id
- enable_ip_forwarding = "true"
- enable_accelerated_networking = "true"
-
- ip_configuration {
- name = "tg1"
- subnet_id = azurerm_subnet.b.id
- private_ip_address_allocation = "Static"
- private_ip_address = "172.16.10.250"
- }
-}
-
-data "azurerm_network_interface" "tg_if1" {
- name = "tg_if1"
- resource_group_name = azurerm_resource_group.CSIT.name
- depends_on = [ azurerm_virtual_machine.tg ]
-}
-
-resource "azurerm_network_interface" "tg_if2" {
- name = "tg_if2"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- network_security_group_id = azurerm_network_security_group.CSIT.id
- enable_ip_forwarding = "true"
- enable_accelerated_networking = "true"
-
- ip_configuration {
- name = "tg2"
- subnet_id = azurerm_subnet.d.id
- private_ip_address_allocation = "Static"
- private_ip_address = "172.16.20.250"
- }
-}
-
-data "azurerm_network_interface" "tg_if2" {
- name = "tg_if2"
- resource_group_name = azurerm_resource_group.CSIT.name
- depends_on = [ azurerm_virtual_machine.tg ]
-}