aboutsummaryrefslogtreecommitdiffstats
path: root/fdio.infra.terraform
diff options
context:
space:
mode:
Diffstat (limited to 'fdio.infra.terraform')
-rw-r--r--fdio.infra.terraform/1n_nmd/main.tf60
-rw-r--r--fdio.infra.terraform/1n_nmd/minio/conf/nomad/mc.hcl73
-rw-r--r--fdio.infra.terraform/1n_nmd/minio/conf/nomad/minio.hcl223
-rw-r--r--fdio.infra.terraform/1n_nmd/minio/main.tf82
-rw-r--r--fdio.infra.terraform/1n_nmd/minio/outputs.tf4
-rw-r--r--fdio.infra.terraform/1n_nmd/minio/providers.tf13
-rw-r--r--fdio.infra.terraform/1n_nmd/minio/variables.tf170
-rw-r--r--fdio.infra.terraform/1n_nmd/versions.tf21
-rw-r--r--fdio.infra.terraform/1n_nmd/vpp_device/conf/nomad/csit_shim.hcl169
-rw-r--r--fdio.infra.terraform/1n_nmd/vpp_device/main.tf21
-rw-r--r--fdio.infra.terraform/1n_nmd/vpp_device/versions.tf13
-rw-r--r--fdio.infra.terraform/3n_azure_fsv2/main.tf593
-rw-r--r--fdio.infra.terraform/3n_azure_fsv2/nic.tf133
-rw-r--r--fdio.infra.terraform/terraform-aws-1n-aws-c5n/hosts.tftpl5
-rw-r--r--fdio.infra.terraform/terraform-aws-1n-aws-c5n/main.tf68
-rw-r--r--fdio.infra.terraform/terraform-aws-1n-aws-c5n/providers.tf2
-rw-r--r--fdio.infra.terraform/terraform-aws-1n-aws-c5n/topology-1n-aws-c5n.tftpl30
-rw-r--r--fdio.infra.terraform/terraform-aws-1n-aws-c5n/variables.tf16
-rw-r--r--fdio.infra.terraform/terraform-aws-1n-aws-c5n/versions.tf10
-rw-r--r--fdio.infra.terraform/terraform-aws-1n-c6in/hosts.tftpl5
-rw-r--r--fdio.infra.terraform/terraform-aws-1n-c6in/main.tf206
-rw-r--r--fdio.infra.terraform/terraform-aws-1n-c6in/output.tf (renamed from fdio.infra.terraform/terraform-aws-2n-aws-c6gn/output.tf)0
-rw-r--r--fdio.infra.terraform/terraform-aws-1n-c6in/providers.tf11
-rw-r--r--fdio.infra.terraform/terraform-aws-1n-c6in/topology-1n-c6in.tftpl30
-rw-r--r--fdio.infra.terraform/terraform-aws-1n-c6in/variables.tf120
-rw-r--r--fdio.infra.terraform/terraform-aws-1n-c6in/versions.tf (renamed from fdio.infra.terraform/terraform-aws-2n-aws-c6gn/versions.tf)10
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-aws-c5n/hosts.tftpl8
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-aws-c5n/main.tf81
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-aws-c5n/providers.tf2
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-aws-c5n/topology-2n-aws-c5n.tftpl51
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-aws-c5n/variables.tf18
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-aws-c5n/versions.tf12
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-c6gn/hosts.tftpl8
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-c6gn/main.tf (renamed from fdio.infra.terraform/terraform-aws-2n-aws-c6gn/main.tf)83
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-c6gn/output.tf0
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-c6gn/providers.tf (renamed from fdio.infra.terraform/terraform-aws-2n-aws-c6gn/providers.tf)0
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-c6gn/topology-2n-c6gn.tftpl51
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-c6gn/variables.tf (renamed from fdio.infra.terraform/terraform-aws-2n-aws-c6gn/variables.tf)8
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-c6gn/versions.tf20
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-c6in/hosts.tftpl8
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-c6in/main.tf335
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-c6in/output.tf0
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-c6in/providers.tf11
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-c6in/topology-2n-c6in.tftpl51
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-c6in/variables.tf180
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-c6in/versions.tf20
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-c7gn/hosts.tftpl8
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-c7gn/main.tf335
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-c7gn/output.tf0
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-c7gn/providers.tf11
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-c7gn/topology-2n-c7gn.tftpl51
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-c7gn/variables.tf168
-rw-r--r--fdio.infra.terraform/terraform-aws-2n-c7gn/versions.tf20
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-aws-c5n/hosts.tftpl9
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-aws-c5n/main.tf93
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-aws-c5n/providers.tf2
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-aws-c5n/topology-3n-aws-c5n.tftpl72
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-aws-c5n/variables.tf22
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-aws-c5n/versions.tf12
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-c6gn/hosts.tftpl9
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-c6gn/main.tf457
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-c6gn/output.tf0
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-c6gn/providers.tf11
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-c6gn/topology-3n-c6gn.tftpl73
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-c6gn/variables.tf216
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-c6gn/versions.tf20
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-c6in/hosts.tftpl9
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-c6in/main.tf457
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-c6in/output.tf0
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-c6in/providers.tf11
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-c6in/topology-3n-c6in.tftpl73
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-c6in/variables.tf216
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-c6in/versions.tf20
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-c7gn/hosts.tftpl9
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-c7gn/main.tf457
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-c7gn/output.tf0
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-c7gn/providers.tf11
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-c7gn/topology-3n-c7gn.tftpl72
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-c7gn/variables.tf216
-rw-r--r--fdio.infra.terraform/terraform-aws-3n-c7gn/versions.tf20
-rw-r--r--fdio.infra.terraform/terraform-aws-elastic-beanstalk-application-version/main.tf21
-rw-r--r--fdio.infra.terraform/terraform-aws-elastic-beanstalk-application-version/output.tf3
-rw-r--r--fdio.infra.terraform/terraform-aws-elastic-beanstalk-application-version/variables.tf22
-rw-r--r--fdio.infra.terraform/terraform-aws-elastic-beanstalk-application-version/versions.tf4
-rw-r--r--fdio.infra.terraform/terraform-aws-elastic-beanstalk-application/versions.tf4
-rw-r--r--fdio.infra.terraform/terraform-aws-elastic-beanstalk-environment/main.tf224
-rw-r--r--fdio.infra.terraform/terraform-aws-elastic-beanstalk-environment/variables.tf74
-rw-r--r--fdio.infra.terraform/terraform-aws-elastic-beanstalk-environment/versions.tf4
-rw-r--r--fdio.infra.terraform/terraform-aws-fdio-csit-dash-app-base/main.tf13
-rw-r--r--fdio.infra.terraform/terraform-aws-fdio-csit-dash-app-base/output.tf3
-rw-r--r--fdio.infra.terraform/terraform-aws-fdio-csit-dash-app-base/variables.tf32
-rw-r--r--fdio.infra.terraform/terraform-aws-fdio-csit-dash-app-base/versions.tf11
-rw-r--r--fdio.infra.terraform/terraform-aws-fdio-csit-dash-env/main.tf27
-rw-r--r--fdio.infra.terraform/terraform-aws-fdio-csit-dash-env/variables.tf4
-rw-r--r--fdio.infra.terraform/terraform-aws-fdio-csit-dash-env/versions.tf10
-rw-r--r--fdio.infra.terraform/terraform-aws-subnet/example/versions.tf6
-rw-r--r--fdio.infra.terraform/terraform-aws-subnet/versions.tf6
-rw-r--r--fdio.infra.terraform/terraform-aws-vpc/example/versions.tf6
-rw-r--r--fdio.infra.terraform/terraform-aws-vpc/versions.tf6
-rw-r--r--fdio.infra.terraform/terraform-nomad-loki/README.md52
-rw-r--r--fdio.infra.terraform/terraform-nomad-loki/conf/nomad/loki.hcl.tftpl261
-rw-r--r--fdio.infra.terraform/terraform-nomad-loki/main.tf40
-rw-r--r--fdio.infra.terraform/terraform-nomad-loki/variables.tf127
-rw-r--r--fdio.infra.terraform/terraform-nomad-prometheus/conf/nomad/prometheus.hcl.tftpl262
-rw-r--r--fdio.infra.terraform/terraform-nomad-prometheus/fdio/main.tf2
-rw-r--r--fdio.infra.terraform/terraform-nomad-prometheus/fdio/versions.tf8
-rw-r--r--fdio.infra.terraform/terraform-nomad-prometheus/main.tf8
-rw-r--r--fdio.infra.terraform/terraform-nomad-prometheus/variables.tf14
-rw-r--r--fdio.infra.terraform/terraform-nomad-prometheus/versions.tf4
-rw-r--r--fdio.infra.terraform/terraform-nomad-pyspark-etl/README.md10
-rw-r--r--fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-coverage-device-rls2402.hcl.tftpl55
-rw-r--r--fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-coverage-hoststack-rls2402.hcl.tftpl55
-rw-r--r--fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-coverage-mrr-rls2402.hcl.tftpl55
-rw-r--r--fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-coverage-ndrpdr-rls2402.hcl.tftpl55
-rw-r--r--fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-coverage-reconf-rls2402.hcl.tftpl55
-rw-r--r--fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-coverage-soak-rls2402.hcl.tftpl55
-rw-r--r--fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-iterative-hoststack-rls2402.hcl.tftpl55
-rw-r--r--fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-iterative-mrr-rls2402.hcl.tftpl55
-rw-r--r--fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-iterative-ndrpdr-rls2402.hcl.tftpl55
-rw-r--r--fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-iterative-reconf-rls2402.hcl.tftpl55
-rw-r--r--fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-iterative-soak-rls2402.hcl.tftpl55
-rw-r--r--fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-stats.hcl.tftpl53
-rw-r--r--fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-trending-hoststack.hcl.tftpl53
-rw-r--r--fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-trending-mrr.hcl.tftpl53
-rw-r--r--fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-trending-ndrpdr.hcl.tftpl55
-rw-r--r--fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-trending-soak.hcl.tftpl55
-rw-r--r--fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl.hcl.tftpl322
-rw-r--r--fdio.infra.terraform/terraform-nomad-pyspark-etl/fdio/main.tf264
-rw-r--r--fdio.infra.terraform/terraform-nomad-pyspark-etl/fdio/variables.tf4
-rw-r--r--fdio.infra.terraform/terraform-nomad-pyspark-etl/fdio/versions.tf8
-rw-r--r--fdio.infra.terraform/terraform-nomad-pyspark-etl/main.tf4
-rw-r--r--fdio.infra.terraform/terraform-nomad-pyspark-etl/variables.tf4
-rw-r--r--fdio.infra.terraform/terraform-nomad-pyspark-etl/versions.tf4
-rw-r--r--fdio.infra.terraform/terraform-nomad-vpp-device/conf/nomad/device-shim.hcl.tftpl78
-rw-r--r--fdio.infra.terraform/terraform-nomad-vpp-device/fdio/main.tf16
-rw-r--r--fdio.infra.terraform/terraform-nomad-vpp-device/fdio/providers.tf (renamed from fdio.infra.terraform/1n_nmd/providers.tf)6
-rw-r--r--fdio.infra.terraform/terraform-nomad-vpp-device/fdio/variables.tf (renamed from fdio.infra.terraform/1n_nmd/variables.tf)20
-rw-r--r--fdio.infra.terraform/terraform-nomad-vpp-device/fdio/versions.tf15
-rw-r--r--fdio.infra.terraform/terraform-nomad-vpp-device/main.tf20
-rw-r--r--fdio.infra.terraform/terraform-nomad-vpp-device/variables.tf (renamed from fdio.infra.terraform/1n_nmd/vpp_device/variables.tf)16
-rw-r--r--fdio.infra.terraform/terraform-nomad-vpp-device/versions.tf (renamed from fdio.infra.terraform/terraform-nomad-loki/versions.tf)4
-rw-r--r--fdio.infra.terraform/terraform-openstack-2n/README.md58
-rw-r--r--fdio.infra.terraform/terraform-openstack-2n/hosts.tftpl8
-rw-r--r--fdio.infra.terraform/terraform-openstack-2n/main.tf211
-rw-r--r--fdio.infra.terraform/terraform-openstack-2n/outputs.tf9
-rw-r--r--fdio.infra.terraform/terraform-openstack-2n/topology-2n.tftpl59
-rw-r--r--fdio.infra.terraform/terraform-openstack-2n/user-data-sut166
-rw-r--r--fdio.infra.terraform/terraform-openstack-2n/user-data-tg171
-rw-r--r--fdio.infra.terraform/terraform-openstack-2n/variables.tf14
-rw-r--r--fdio.infra.terraform/terraform-openstack-2n/versions.tf9
-rw-r--r--fdio.infra.terraform/terraform-vault-aws-secret-backend/fdio/variables.tf6
-rw-r--r--fdio.infra.terraform/terraform-vault-aws-secret-backend/fdio/versions.tf6
152 files changed, 6776 insertions, 2863 deletions
diff --git a/fdio.infra.terraform/1n_nmd/main.tf b/fdio.infra.terraform/1n_nmd/main.tf
deleted file mode 100644
index 77701835aa..0000000000
--- a/fdio.infra.terraform/1n_nmd/main.tf
+++ /dev/null
@@ -1,60 +0,0 @@
-# For convenience in simple configurations, a child module automatically
-# inherits default (un-aliased) provider configurations from its parent.
-# This means that explicit provider blocks appear only in the root module,
-# and downstream modules can simply declare resources for that provider
-# and have them automatically associated with the root provider
-# configurations.
-
-#module "minio" {
-# source = "./minio"
-# providers = {
-# nomad = nomad.yul1
-# }
-#
-# # nomad
-# nomad_datacenters = ["yul1"]
-# nomad_host_volume = "prod-volume-data1-1"
-#
-# # minio
-# minio_job_name = "prod-minio"
-# minio_group_count = 4
-# minio_service_name = "storage"
-# minio_host = "http://10.32.8.1{4...7}"
-# minio_port = 9000
-# minio_container_image = "minio/minio:RELEASE.2021-07-27T02-40-15Z"
-# minio_vault_secret = {
-# use_vault_provider = false,
-# vault_kv_policy_name = "kv-secret",
-# vault_kv_path = "secret/data/minio",
-# vault_kv_field_access_key = "access_key",
-# vault_kv_field_secret_key = "secret_key"
-# }
-# minio_data_dir = "/data/"
-# minio_use_host_volume = true
-# minio_use_canary = true
-# minio_envs = ["MINIO_BROWSER=\"off\""]
-#
-# minio_buckets = ["logs.fd.io"]
-#}
-
-data "vault_generic_secret" "minio_creds" {
- path = "kv/secret/data/minio"
-}
-
-module "vpp_device" {
- source = "./vpp_device"
- providers = {
- nomad = nomad.yul1
- }
-
- # nomad
- nomad_datacenters = ["yul1"]
-
- # csit_shim
- csit_shim_job_name = "prod-device-csit-shim"
- csit_shim_group_count = "1"
- csit_shim_cpu = "1500"
- csit_shim_mem = "4096"
- csit_shim_image_aarch64 = "fdiotools/csit_shim-ubuntu2004:2021_03_02_143938_UTC-aarch64"
- csit_shim_image_x86_64 = "fdiotools/csit_shim-ubuntu2004:2021_03_04_142103_UTC-x86_64"
-}
diff --git a/fdio.infra.terraform/1n_nmd/minio/conf/nomad/mc.hcl b/fdio.infra.terraform/1n_nmd/minio/conf/nomad/mc.hcl
deleted file mode 100644
index 238003bb00..0000000000
--- a/fdio.infra.terraform/1n_nmd/minio/conf/nomad/mc.hcl
+++ /dev/null
@@ -1,73 +0,0 @@
-job "${job_name}" {
- # The "region" parameter specifies the region in which to execute the job.
- # If omitted, this inherits the default region name of "global".
- # region = "global"
- #
- # The "datacenters" parameter specifies the list of datacenters which should
- # be considered when placing this task. This must be provided.
- datacenters = "${datacenters}"
-
- # The "type" parameter controls the type of job, which impacts the scheduler's
- # decision on placement. This configuration is optional and defaults to
- # "service". For a full list of job types and their differences, please see
- # the online documentation.
- #
- # For more information, please see the online documentation at:
- #
- # https://www.nomadproject.io/docs/jobspec/schedulers.html
- #
- type = "batch"
-
- # The "group" stanza defines a series of tasks that should be co-located on
- # the same Nomad client. Any task within a group will be placed on the same
- # client.
- #
- # For more information and examples on the "group" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/group.html
- #
- group "prod-group1-mc" {
- task "prod-task1-create-buckets" {
- # The "driver" parameter specifies the task driver that should be used to
- # run the task.
- driver = "docker"
-
- %{ if use_vault_provider }
- vault {
- policies = "${vault_kv_policy_name}"
- }
- %{ endif }
-
- # The "config" stanza specifies the driver configuration, which is passed
- # directly to the driver to start the task. The details of configurations
- # are specific to each driver, so please see specific driver
- # documentation for more information.
- config {
- image = "${image}"
- entrypoint = [
- "/bin/sh",
- "-c",
- "${command}"
- ]
- dns_servers = [ "$${attr.unique.network.ip-address}" ]
- privileged = false
- }
-
- # The env stanza configures a list of environment variables to populate
- # the task's environment before starting.
- env {
- %{ if use_vault_provider }
- {{ with secret "${vault_kv_path}" }}
- MINIO_ACCESS_KEY = "{{ .Data.data.${vault_kv_field_access_key} }}"
- MINIO_SECRET_KEY = "{{ .Data.data.${vault_kv_field_secret_key} }}"
- {{ end }}
- %{ else }
- MINIO_ACCESS_KEY = "${access_key}"
- MINIO_SECRET_KEY = "${secret_key}"
- %{ endif }
- ${ envs }
- }
- }
- }
-}
diff --git a/fdio.infra.terraform/1n_nmd/minio/conf/nomad/minio.hcl b/fdio.infra.terraform/1n_nmd/minio/conf/nomad/minio.hcl
deleted file mode 100644
index 3889b51a9f..0000000000
--- a/fdio.infra.terraform/1n_nmd/minio/conf/nomad/minio.hcl
+++ /dev/null
@@ -1,223 +0,0 @@
-job "${job_name}" {
- # The "region" parameter specifies the region in which to execute the job.
- # If omitted, this inherits the default region name of "global".
- # region = "global"
- #
- # The "datacenters" parameter specifies the list of datacenters which should
- # be considered when placing this task. This must be provided.
- datacenters = "${datacenters}"
-
- # The "type" parameter controls the type of job, which impacts the scheduler's
- # decision on placement. This configuration is optional and defaults to
- # "service". For a full list of job types and their differences, please see
- # the online documentation.
- #
- # https://www.nomadproject.io/docs/jobspec/schedulers
- #
- type = "service"
-
- update {
- # The "max_parallel" parameter specifies the maximum number of updates to
- # perform in parallel. In this case, this specifies to update a single task
- # at a time.
- max_parallel = 1
-
- health_check = "checks"
-
- # The "min_healthy_time" parameter specifies the minimum time the allocation
- # must be in the healthy state before it is marked as healthy and unblocks
- # further allocations from being updated.
- min_healthy_time = "10s"
-
- # The "healthy_deadline" parameter specifies the deadline in which the
- # allocation must be marked as healthy after which the allocation is
- # automatically transitioned to unhealthy. Transitioning to unhealthy will
- # fail the deployment and potentially roll back the job if "auto_revert" is
- # set to true.
- healthy_deadline = "3m"
-
- # The "progress_deadline" parameter specifies the deadline in which an
- # allocation must be marked as healthy. The deadline begins when the first
- # allocation for the deployment is created and is reset whenever an allocation
- # as part of the deployment transitions to a healthy state. If no allocation
- # transitions to the healthy state before the progress deadline, the
- # deployment is marked as failed.
- progress_deadline = "10m"
-
-%{ if use_canary }
- # The "canary" parameter specifies that changes to the job that would result
- # in destructive updates should create the specified number of canaries
- # without stopping any previous allocations. Once the operator determines the
- # canaries are healthy, they can be promoted which unblocks a rolling update
- # of the remaining allocations at a rate of "max_parallel".
- #
- # Further, setting "canary" equal to the count of the task group allows
- # blue/green deployments. When the job is updated, a full set of the new
- # version is deployed and upon promotion the old version is stopped.
- canary = 1
-
- # Specifies if the job should auto-promote to the canary version when all
- # canaries become healthy during a deployment. Defaults to false which means
- # canaries must be manually updated with the nomad deployment promote
- # command.
- auto_promote = true
-
- # The "auto_revert" parameter specifies if the job should auto-revert to the
- # last stable job on deployment failure. A job is marked as stable if all the
- # allocations as part of its deployment were marked healthy.
- auto_revert = true
-%{ endif }
- }
-
- # All groups in this job should be scheduled on different hosts.
- constraint {
- operator = "distinct_hosts"
- value = "true"
- }
-
- # The "group" stanza defines a series of tasks that should be co-located on
- # the same Nomad client. Any task within a group will be placed on the same
- # client.
- #
- # https://www.nomadproject.io/docs/job-specification/group
- #
- group "prod-group1-minio" {
- # The "count" parameter specifies the number of the task groups that should
- # be running under this group. This value must be non-negative and defaults
- # to 1.
- count = ${group_count}
-
- # https://www.nomadproject.io/docs/job-specification/volume
- %{ if use_host_volume }
- volume "prod-volume1-minio" {
- type = "host"
- read_only = false
- source = "${host_volume}"
- }
- %{ endif }
-
- # The restart stanza configures a tasks's behavior on task failure. Restarts
- # happen on the client that is running the task.
- #
- # https://www.nomadproject.io/docs/job-specification/restart
- #
- restart {
- interval = "30m"
- attempts = 40
- delay = "15s"
- mode = "delay"
- }
-
- # The "task" stanza creates an individual unit of work, such as a Docker
- # container, web application, or batch processing.
- #
- # https://www.nomadproject.io/docs/job-specification/task.html
- #
- task "prod-task1-minio" {
- # The "driver" parameter specifies the task driver that should be used to
- # run the task.
- driver = "docker"
-
- %{ if use_host_volume }
- volume_mount {
- volume = "prod-volume1-minio"
- destination = "${data_dir}"
- read_only = false
- }
- %{ endif }
-
- %{ if use_vault_provider }
- vault {
- policies = "${vault_kv_policy_name}"
- }
- %{ endif }
-
- # The "config" stanza specifies the driver configuration, which is passed
- # directly to the driver to start the task. The details of configurations
- # are specific to each driver, so please see specific driver
- # documentation for more information.
- config {
- image = "${image}"
- dns_servers = [ "172.17.0.1" ]
- network_mode = "host"
- command = "server"
- args = [ "${host}:${port}${data_dir}" ]
- port_map {
- http = ${port}
- }
- privileged = false
- }
-
- # The env stanza configures a list of environment variables to populate
- # the task's environment before starting.
- env {
-%{ if use_vault_provider }
-{{ with secret "${vault_kv_path}" }}
- MINIO_ACCESS_KEY = "{{ .Data.data.${vault_kv_field_access_key} }}"
- MINIO_SECRET_KEY = "{{ .Data.data.${vault_kv_field_secret_key} }}"
-{{ end }}
-%{ else }
- MINIO_ACCESS_KEY = "${access_key}"
- MINIO_SECRET_KEY = "${secret_key}"
-%{ endif }
- ${ envs }
- }
-
- # The service stanza instructs Nomad to register a service with Consul.
- #
- # https://www.nomadproject.io/docs/job-specification/service
- #
- service {
- name = "${service_name}"
- port = "http"
- tags = [ "storage$${NOMAD_ALLOC_INDEX}" ]
- check {
- name = "Min.io Server HTTP Check Live"
- type = "http"
- port = "http"
- protocol = "http"
- method = "GET"
- path = "/minio/health/live"
- interval = "10s"
- timeout = "2s"
- }
- check {
- name = "Min.io Server HTTP Check Ready"
- type = "http"
- port = "http"
- protocol = "http"
- method = "GET"
- path = "/minio/health/ready"
- interval = "10s"
- timeout = "2s"
- }
- }
-
- # The "resources" stanza describes the requirements a task needs to
- # execute. Resource requirements include memory, network, cpu, and more.
- # This ensures the task will execute on a machine that contains enough
- # resource capacity.
- #
- # https://www.nomadproject.io/docs/job-specification/resources
- #
- resources {
- cpu = ${cpu}
- memory = ${memory}
- # The network stanza specifies the networking requirements for the task
- # group, including the network mode and port allocations. When scheduling
- # jobs in Nomad they are provisioned across your fleet of machines along
- # with other jobs and services. Because you don't know in advance what host
- # your job will be provisioned on, Nomad will provide your tasks with
- # network configuration when they start up.
- #
- # https://www.nomadproject.io/docs/job-specification/network
- #
- network {
- port "http" {
- static = ${port}
- }
- }
- }
- }
- }
-}
diff --git a/fdio.infra.terraform/1n_nmd/minio/main.tf b/fdio.infra.terraform/1n_nmd/minio/main.tf
deleted file mode 100644
index 6954cc2f05..0000000000
--- a/fdio.infra.terraform/1n_nmd/minio/main.tf
+++ /dev/null
@@ -1,82 +0,0 @@
-locals {
- datacenters = join(",", var.nomad_datacenters)
- minio_env_vars = join("\n",
- concat([
- ], var.minio_envs)
- )
- mc_env_vars = join("\n",
- concat([
- ], var.mc_envs)
- )
- mc_formatted_bucket_list = formatlist("LOCALMINIO/%s", var.minio_buckets)
- mc_add_config_command = concat(
- [
- "mc",
- "config",
- "host",
- "add",
- "LOCALMINIO",
- "http://${var.minio_service_name}.service.consul:${var.minio_port}",
- "$MINIO_ACCESS_KEY",
- "$MINIO_SECRET_KEY",
- ])
- mc_create_bucket_command = concat(["mc", "mb", "-p"], local.mc_formatted_bucket_list)
- command = join(" ", concat(local.mc_add_config_command, ["&&"], local.mc_create_bucket_command, [";"], concat(var.mc_extra_commands)))
-}
-
-data "template_file" "nomad_job_minio" {
- template = file("${path.module}/conf/nomad/minio.hcl")
- vars = {
- job_name = var.minio_job_name
- datacenters = local.datacenters
- use_canary = var.minio_use_canary
- group_count = var.minio_group_count
- use_host_volume = var.minio_use_host_volume
- host_volume = var.nomad_host_volume
- service_name = var.minio_service_name
- host = var.minio_host
- port = var.minio_port
- upstreams = jsonencode(var.minio_upstreams)
- cpu_proxy = var.minio_resource_proxy.cpu
- memory_proxy = var.minio_resource_proxy.memory
- use_vault_provider = var.minio_vault_secret.use_vault_provider
- image = var.minio_container_image
- access_key = var.minio_access_key
- secret_key = var.minio_secret_key
- data_dir = var.minio_data_dir
- envs = local.minio_env_vars
- cpu = var.minio_cpu
- memory = var.minio_memory
- }
-}
-
-data "template_file" "nomad_job_mc" {
- template = file("${path.module}/conf/nomad/mc.hcl")
- vars = {
- job_name = var.mc_job_name
- service_name = var.mc_service_name
- datacenters = local.datacenters
- minio_service_name = var.minio_service_name
- minio_port = var.minio_port
- image = var.mc_container_image
- access_key = var.minio_access_key
- secret_key = var.minio_secret_key
- use_vault_provider = var.minio_vault_secret.use_vault_provider
- envs = local.mc_env_vars
- command = local.command
- }
-}
-
-resource "nomad_job" "nomad_job_minio" {
- jobspec = data.template_file.nomad_job_minio.rendered
- detach = false
-}
-
-#resource "nomad_job" "nomad_job_mc" {
-# jobspec = data.template_file.nomad_job_mc.rendered
-# detach = false
-#
-# depends_on = [
-# nomad_job.nomad_job_minio
-# ]
-#} \ No newline at end of file
diff --git a/fdio.infra.terraform/1n_nmd/minio/outputs.tf b/fdio.infra.terraform/1n_nmd/minio/outputs.tf
deleted file mode 100644
index 309cd3b9d0..0000000000
--- a/fdio.infra.terraform/1n_nmd/minio/outputs.tf
+++ /dev/null
@@ -1,4 +0,0 @@
-output "minio_service_name" {
- description = "Minio service name"
- value = data.template_file.nomad_job_minio.vars.service_name
-} \ No newline at end of file
diff --git a/fdio.infra.terraform/1n_nmd/minio/providers.tf b/fdio.infra.terraform/1n_nmd/minio/providers.tf
deleted file mode 100644
index 1399201d21..0000000000
--- a/fdio.infra.terraform/1n_nmd/minio/providers.tf
+++ /dev/null
@@ -1,13 +0,0 @@
-terraform {
- required_providers {
- nomad = {
- source = "hashicorp/nomad"
- version = "~> 1.4.9"
- }
- template = {
- source = "hashicorp/template"
- version = "~> 2.1.2"
- }
- }
- required_version = ">= 1.0.3"
-}
diff --git a/fdio.infra.terraform/1n_nmd/minio/variables.tf b/fdio.infra.terraform/1n_nmd/minio/variables.tf
deleted file mode 100644
index ab9d07f0d7..0000000000
--- a/fdio.infra.terraform/1n_nmd/minio/variables.tf
+++ /dev/null
@@ -1,170 +0,0 @@
-# Nomad
-variable "nomad_datacenters" {
- description = "Nomad data centers"
- type = list(string)
- default = ["dc1"]
-}
-
-variable "nomad_host_volume" {
- description = "Nomad Host Volume"
- type = string
- default = "persistence"
-}
-
-# Minio
-variable "minio_job_name" {
- description = "Minio job name"
- type = string
- default = "minio"
-}
-
-variable "minio_service_name" {
- description = "Minio service name"
- type = string
- default = "minio"
-}
-
-variable "minio_group_count" {
- description = "Number of Minio group instances"
- type = number
- default = 1
-}
-
-variable "minio_host" {
- description = "Minio host"
- type = string
- default = "127.0.0.1"
-}
-
-variable "minio_port" {
- description = "Minio port"
- type = number
- default = 9000
-}
-
-variable "minio_cpu" {
- description = "CPU allocation for Minio"
- type = number
- default = 40000
-}
-
-variable "minio_memory" {
- description = "Memory allocation for Minio"
- type = number
- default = 40000
-}
-
-variable "minio_container_image" {
- description = "Minio docker image"
- type = string
- default = "minio/minio:latest"
-}
-
-variable "minio_envs" {
- description = "Minio environment variables"
- type = list(string)
- default = []
-}
-
-variable "minio_access_key" {
- description = "Minio access key"
- type = string
- default = "minio"
-}
-
-variable "minio_secret_key" {
- description = "Minio secret key"
- type = string
- default = "minio123"
-}
-
-variable "minio_data_dir" {
- description = "Minio server data dir"
- type = string
- default = "/data/"
-}
-
-variable "minio_use_host_volume" {
- description = "Use Nomad host volume feature"
- type = bool
- default = false
-}
-
-variable "minio_use_canary" {
- description = "Uses canary deployment for Minio"
- type = bool
- default = false
-}
-
-variable "minio_vault_secret" {
- description = "Set of properties to be able to fetch secret from vault"
- type = object({
- use_vault_provider = bool,
- vault_kv_policy_name = string,
- vault_kv_path = string,
- vault_kv_field_access_key = string,
- vault_kv_field_secret_key = string
- })
-}
-
-variable "minio_resource_proxy" {
- description = "Minio proxy resources"
- type = object({
- cpu = number,
- memory = number
- })
- default = {
- cpu = 200,
- memory = 128
- }
- validation {
- condition = var.minio_resource_proxy.cpu >= 200 && var.minio_resource_proxy.memory >= 128
- error_message = "Proxy resource must be at least: cpu=200, memory=128."
- }
-}
-
-# MC
-variable "mc_job_name" {
- description = "Minio client job name"
- type = string
- default = "mc"
-}
-
-variable "mc_service_name" {
- description = "Minio client service name"
- type = string
- default = "mc"
-}
-
-variable "mc_container_image" {
- description = "Minio client docker image"
- type = string
- default = "minio/mc:latest"
-}
-
-variable "mc_envs" {
- description = "Minio client environment variables"
- type = list(string)
- default = []
-}
-
-variable "minio_buckets" {
- description = "List of buckets to create on startup"
- type = list(string)
- default = []
-}
-
-variable "minio_upstreams" {
- description = "List of upstream services (list of object with service_name, port)"
- type = list(object({
- service_name = string,
- port = number,
- }))
- default = []
-}
-
-variable "mc_extra_commands" {
- description = "Extra commands to run in MC container after creating buckets"
- type = list(string)
- default = [""]
-} \ No newline at end of file
diff --git a/fdio.infra.terraform/1n_nmd/versions.tf b/fdio.infra.terraform/1n_nmd/versions.tf
deleted file mode 100644
index 556ddbaee4..0000000000
--- a/fdio.infra.terraform/1n_nmd/versions.tf
+++ /dev/null
@@ -1,21 +0,0 @@
-terraform {
- backend "consul" {
- address = "consul.service.consul:8500"
- scheme = "http"
- path = "terraform/nomad"
- }
- required_providers {
- nomad = {
- source = "hashicorp/nomad"
- version = "~> 1.4.9"
- }
- template = {
- source = "hashicorp/template"
- version = "~> 2.2.0"
- }
- vault = {
- version = ">=2.14.0"
- }
- }
- required_version = ">= 1.0.3"
-} \ No newline at end of file
diff --git a/fdio.infra.terraform/1n_nmd/vpp_device/conf/nomad/csit_shim.hcl b/fdio.infra.terraform/1n_nmd/vpp_device/conf/nomad/csit_shim.hcl
deleted file mode 100644
index 9763088dcd..0000000000
--- a/fdio.infra.terraform/1n_nmd/vpp_device/conf/nomad/csit_shim.hcl
+++ /dev/null
@@ -1,169 +0,0 @@
-job "${job_name}" {
- # The "region" parameter specifies the region in which to execute the job.
- # If omitted, this inherits the default region name of "global".
- # region = "global"
- #
- # The "datacenters" parameter specifies the list of datacenters which should
- # be considered when placing this task. This must be provided.
- datacenters = "${datacenters}"
-
- # The "type" parameter controls the type of job, which impacts the scheduler's
- # decision on placement. This configuration is optional and defaults to
- # "service". For a full list of job types and their differences, please see
- # the online documentation.
- #
- # For more information, please see the online documentation at:
- #
- # https://www.nomadproject.io/docs/jobspec/schedulers.html
- #
- type = "system"
-
- # The "group" stanza defines a series of tasks that should be co-located on
- # the same Nomad client. Any task within a group will be placed on the same
- # client.
- #
- # For more information and examples on the "group" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/group.html
- #
- group "prod-group1-csit-shim-amd" {
- # The "count" parameter specifies the number of the task groups that should
- # be running under this group. This value must be non-negative and defaults
- # to 1.
- count = ${group_count}
-
- constraint {
- attribute = "$${node.class}"
- value = "csit"
- }
-
- restart {
- interval = "1m"
- attempts = 3
- delay = "15s"
- mode = "delay"
- }
-
- # The "task" stanza creates an individual unit of work, such as a Docker
- # container, web application, or batch processing.
- #
- # For more information and examples on the "task" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/task.html
- #
- task "prod-task1-csit-shim-amd" {
- # The "driver" parameter specifies the task driver that should be used to
- # run the task.
- driver = "docker"
-
- # The "config" stanza specifies the driver configuration, which is passed
- # directly to the driver to start the task. The details of configurations
- # are specific to each driver, so please see specific driver
- # documentation for more information.
- config {
- image = "${image_x86_64}"
- network_mode = "host"
- pid_mode = "host"
- volumes = [
- "/var/run/docker.sock:/var/run/docker.sock"
- ]
- privileged = true
- }
-
- # The "resources" stanza describes the requirements a task needs to
- # execute. Resource requirements include memory, network, cpu, and more.
- # This ensures the task will execute on a machine that contains enough
- # resource capacity.
- #
- # For more information and examples on the "resources" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/resources.html
- #
- resources {
- cpu = ${cpu}
- memory = ${mem}
- network {
- port "ssh" {
- static = 6022
- }
- port "ssh2" {
- static = 6023
- }
- }
- }
- }
- }
-
- group "prod-group1-csit-shim-arm" {
- # The "count" parameter specifies the number of the task groups that should
- # be running under this group. This value must be non-negative and defaults
- # to 1.
- count = ${group_count}
-
- constraint {
- attribute = "$${node.class}"
- value = "csitarm"
- }
-
- restart {
- interval = "1m"
- attempts = 3
- delay = "15s"
- mode = "delay"
- }
-
- # The "task" stanza creates an individual unit of work, such as a Docker
- # container, web application, or batch processing.
- #
- # For more information and examples on the "task" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/task.html
- #
- task "prod-task1-csit-shim-arm" {
- # The "driver" parameter specifies the task driver that should be used to
- # run the task.
- driver = "docker"
-
- # The "config" stanza specifies the driver configuration, which is passed
- # directly to the driver to start the task. The details of configurations
- # are specific to each driver, so please see specific driver
- # documentation for more information.
- config {
- image = "${image_aarch64}"
- network_mode = "host"
- pid_mode = "host"
- volumes = [
- "/var/run/docker.sock:/var/run/docker.sock"
- ]
- privileged = true
- }
-
- # The "resources" stanza describes the requirements a task needs to
- # execute. Resource requirements include memory, network, cpu, and more.
- # This ensures the task will execute on a machine that contains enough
- # resource capacity.
- #
- # For more information and examples on the "resources" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/resources.html
- #
- resources {
- cpu = ${cpu}
- memory = ${mem}
- network {
- port "ssh" {
- static = 6022
- }
- port "ssh2" {
- static = 6023
- }
- }
- }
- }
- }
-} \ No newline at end of file
diff --git a/fdio.infra.terraform/1n_nmd/vpp_device/main.tf b/fdio.infra.terraform/1n_nmd/vpp_device/main.tf
deleted file mode 100644
index 89b28ce385..0000000000
--- a/fdio.infra.terraform/1n_nmd/vpp_device/main.tf
+++ /dev/null
@@ -1,21 +0,0 @@
-locals {
- datacenters = join(",", var.nomad_datacenters)
-}
-
-data "template_file" "nomad_job_csit_shim" {
- template = file("${path.module}/conf/nomad/csit_shim.hcl")
- vars = {
- datacenters = local.datacenters
- job_name = var.csit_shim_job_name
- group_count = var.csit_shim_group_count
- cpu = var.csit_shim_cpu
- mem = var.csit_shim_mem
- image_aarch64 = var.csit_shim_image_aarch64
- image_x86_64 = var.csit_shim_image_x86_64
- }
-}
-
-resource "nomad_job" "nomad_job_csit_shim" {
- jobspec = data.template_file.nomad_job_csit_shim.rendered
- detach = false
-} \ No newline at end of file
diff --git a/fdio.infra.terraform/1n_nmd/vpp_device/versions.tf b/fdio.infra.terraform/1n_nmd/vpp_device/versions.tf
deleted file mode 100644
index b80610a525..0000000000
--- a/fdio.infra.terraform/1n_nmd/vpp_device/versions.tf
+++ /dev/null
@@ -1,13 +0,0 @@
-terraform {
- required_providers {
- nomad = {
- source = "hashicorp/nomad"
- version = "~> 1.4.15"
- }
- template = {
- source = "hashicorp/template"
- version = "~> 2.2.0"
- }
- }
- required_version = ">= 1.0.3"
-}
diff --git a/fdio.infra.terraform/3n_azure_fsv2/main.tf b/fdio.infra.terraform/3n_azure_fsv2/main.tf
deleted file mode 100644
index f84f521ecd..0000000000
--- a/fdio.infra.terraform/3n_azure_fsv2/main.tf
+++ /dev/null
@@ -1,593 +0,0 @@
-provider "azurerm" {
- version = ">= 1.4.0"
-}
-
-# Variables
-
-variable "vpc_addr_space_a" {
- type = string
- default = "172.16.0.0/16"
-}
-
-variable "vpc_cidr_a" {
- type = string
- default = "172.16.0.0/24"
-}
-
-variable "vpc_cidr_b" {
- type = string
- default = "172.16.10.0/24"
-}
-
-variable "vpc_cidr_c" {
- type = string
- default = "172.16.200.0/24"
-}
-
-variable "vpc_cidr_d" {
- type = string
- default = "172.16.20.0/24"
-}
-
-variable "trex_dummy_cidr_port_0" {
- type = string
- default = "172.16.11.0/24"
-}
-
-variable "trex_dummy_cidr_port_1" {
- type = string
- default = "172.16.21.0/24"
-}
-
-# Create resource group and resources
-
-resource "azurerm_resource_group" "CSIT" {
- name = "CSIT"
- #location = "East US"
- location = "UK South"
-}
-
-resource "azurerm_virtual_network" "CSIT" {
- name = "CSIT-network"
- resource_group_name = azurerm_resource_group.CSIT.name
- location = azurerm_resource_group.CSIT.location
- address_space = [var.vpc_addr_space_a]
- depends_on = [azurerm_resource_group.CSIT]
-}
-
-resource "azurerm_subnet" "a" {
- name = "subnet_a"
- resource_group_name = azurerm_resource_group.CSIT.name
- virtual_network_name = azurerm_virtual_network.CSIT.name
- address_prefix = var.vpc_cidr_a
- depends_on = [azurerm_resource_group.CSIT]
-}
-
-resource "azurerm_subnet" "b" {
- name = "subnet_b"
- resource_group_name = azurerm_resource_group.CSIT.name
- virtual_network_name = azurerm_virtual_network.CSIT.name
- address_prefix = var.vpc_cidr_b
- depends_on = [azurerm_resource_group.CSIT]
-}
-
-resource "azurerm_subnet" "c" {
- name = "subnet_c"
- resource_group_name = azurerm_resource_group.CSIT.name
- virtual_network_name = azurerm_virtual_network.CSIT.name
- address_prefix = var.vpc_cidr_c
- depends_on = [azurerm_resource_group.CSIT]
-}
-
-resource "azurerm_subnet" "d" {
- name = "subnet_d"
- resource_group_name = azurerm_resource_group.CSIT.name
- virtual_network_name = azurerm_virtual_network.CSIT.name
- address_prefix = var.vpc_cidr_d
- depends_on = [azurerm_resource_group.CSIT]
-}
-
-# Create a security group of the Kiknos instances
-
-resource "azurerm_network_security_group" "CSIT" {
- name = "CSIT"
- resource_group_name = azurerm_resource_group.CSIT.name
- location = azurerm_resource_group.CSIT.location
- security_rule {
- name = "IpSec"
- priority = 100
- direction = "Inbound"
- access = "Allow"
- protocol = "Udp"
- source_port_range = "*"
- destination_port_range = "500"
- source_address_prefix = "*"
- destination_address_prefix = "*"
- }
- security_rule {
- name = "IpSec-NAT"
- priority = 101
- direction = "Inbound"
- access = "Allow"
- protocol = "Udp"
- source_port_range = "*"
- destination_port_range = "4500"
- source_address_prefix = "*"
- destination_address_prefix = "*"
- }
- security_rule {
- name = "SSH"
- priority = 102
- direction = "Inbound"
- access = "Allow"
- protocol = "Tcp"
- source_port_range = "*"
- destination_port_range = "22"
- source_address_prefix = "*"
- destination_address_prefix = "*"
- }
- security_rule {
- name = "InboundAll"
- priority = 103
- direction = "Inbound"
- access = "Allow"
- protocol = "*"
- source_port_range = "*"
- destination_port_range = "*"
- source_address_prefix = "*"
- destination_address_prefix = "*"
- }
- security_rule {
- name = "Outbound"
- priority = 104
- direction = "Outbound"
- access = "Allow"
- protocol = "*"
- source_port_range = "*"
- destination_port_range = "*"
- source_address_prefix = "*"
- destination_address_prefix = "*"
- }
- depends_on = [azurerm_virtual_network.CSIT]
-}
-
-# Create public IPs
-
-resource "azurerm_public_ip" "tg_public_ip" {
- name = "tg_public_ip"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- allocation_method = "Dynamic"
- depends_on = [azurerm_resource_group.CSIT]
-}
-
-resource "azurerm_public_ip" "dut1_public_ip" {
- name = "dut1_public_ip"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- allocation_method = "Dynamic"
- depends_on = [azurerm_resource_group.CSIT]
-}
-
-resource "azurerm_public_ip" "dut2_public_ip" {
- name = "dut2_public_ip"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- allocation_method = "Dynamic"
- depends_on = [azurerm_resource_group.CSIT]
-}
-
-# Create network interface
-
-resource "azurerm_network_interface" "tg_mng" {
- name = "tg_mng"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- network_security_group_id = azurerm_network_security_group.CSIT.id
- ip_configuration {
- primary = "true"
- name = "tg_mng_ip"
- subnet_id = azurerm_subnet.a.id
- private_ip_address_allocation = "Static"
- private_ip_address = "172.16.0.10"
- public_ip_address_id = azurerm_public_ip.tg_public_ip.id
- }
- depends_on = [azurerm_resource_group.CSIT,
- azurerm_subnet.a,
- azurerm_public_ip.tg_public_ip]
-}
-
-resource "azurerm_network_interface" "dut1_mng" {
- name = "dut1_mng"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- network_security_group_id = azurerm_network_security_group.CSIT.id
- ip_configuration {
- primary = "true"
- name = "dut1_mng_ip"
- subnet_id = azurerm_subnet.a.id
- private_ip_address_allocation = "Static"
- private_ip_address = "172.16.0.11"
- public_ip_address_id = azurerm_public_ip.dut1_public_ip.id
- }
- depends_on = [azurerm_resource_group.CSIT,
- azurerm_subnet.a,
- azurerm_public_ip.dut1_public_ip]
-}
-
-resource "azurerm_network_interface" "dut2_mng" {
- name = "dut2_mng"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- network_security_group_id = azurerm_network_security_group.CSIT.id
- ip_configuration {
- primary = "true"
- name = "dut2_mng_ip"
- subnet_id = azurerm_subnet.a.id
- private_ip_address_allocation = "Static"
- private_ip_address = "172.16.0.12"
- public_ip_address_id = azurerm_public_ip.dut2_public_ip.id
- }
- depends_on = [azurerm_resource_group.CSIT,
- azurerm_subnet.a,
- azurerm_public_ip.dut2_public_ip]
-}
-
-resource "azurerm_route_table" "b" {
- name = "b"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- depends_on = [azurerm_resource_group.CSIT,
- azurerm_subnet.b]
- disable_bgp_route_propagation = false
- route {
- name = "route-10"
- address_prefix = var.trex_dummy_cidr_port_0
- next_hop_type = "VirtualAppliance"
- next_hop_in_ip_address = data.azurerm_network_interface.tg_if1.private_ip_address
- }
- route {
- name = "route-20"
- address_prefix = var.trex_dummy_cidr_port_1
- next_hop_type = "VirtualAppliance"
- next_hop_in_ip_address = data.azurerm_network_interface.dut1_if1.private_ip_address
- }
- route {
- name = "tg2"
- address_prefix = var.vpc_cidr_d
- next_hop_type = "VirtualAppliance"
- next_hop_in_ip_address = data.azurerm_network_interface.dut1_if1.private_ip_address
- }
-}
-
-resource "azurerm_route_table" "c" {
- name = "c"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- depends_on = [azurerm_resource_group.CSIT,
- azurerm_subnet.c]
- disable_bgp_route_propagation = false
- route {
- name = "route-10"
- address_prefix = var.trex_dummy_cidr_port_0
- next_hop_type = "VirtualAppliance"
- next_hop_in_ip_address = data.azurerm_network_interface.dut1_if2.private_ip_address
- }
- route {
- name = "route-100"
- address_prefix = "100.0.0.0/8"
- next_hop_type = "VirtualAppliance"
- next_hop_in_ip_address = data.azurerm_network_interface.dut1_if2.private_ip_address
- }
- route {
- name = "route-20"
- address_prefix = var.trex_dummy_cidr_port_1
- next_hop_type = "VirtualAppliance"
- next_hop_in_ip_address = data.azurerm_network_interface.dut2_if1.private_ip_address
- }
- route {
- name = "tg1"
- address_prefix = var.vpc_cidr_b
- next_hop_type = "VirtualAppliance"
- next_hop_in_ip_address = data.azurerm_network_interface.dut1_if2.private_ip_address
- }
- route {
- name = "tg2"
- address_prefix = var.vpc_cidr_d
- next_hop_type = "VirtualAppliance"
- next_hop_in_ip_address = data.azurerm_network_interface.dut2_if1.private_ip_address
- }
-}
-
-resource "azurerm_route_table" "d" {
- name = "d"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- depends_on = [azurerm_resource_group.CSIT,
- azurerm_subnet.d]
- disable_bgp_route_propagation = false
- route {
- name = "route-10"
- address_prefix = var.trex_dummy_cidr_port_0
- next_hop_type = "VirtualAppliance"
- next_hop_in_ip_address = data.azurerm_network_interface.dut2_if2.private_ip_address
- }
- route {
- name = "route-20"
- address_prefix = var.trex_dummy_cidr_port_1
- next_hop_type = "VirtualAppliance"
- next_hop_in_ip_address = data.azurerm_network_interface.tg_if2.private_ip_address
- }
- route {
- name = "tg1"
- address_prefix = var.vpc_cidr_b
- next_hop_type = "VirtualAppliance"
- next_hop_in_ip_address = data.azurerm_network_interface.dut2_if2.private_ip_address
- }
-}
-
-resource "azurerm_subnet_route_table_association" "b" {
- subnet_id = azurerm_subnet.b.id
- route_table_id = azurerm_route_table.b.id
-}
-
-resource "azurerm_subnet_route_table_association" "c" {
- subnet_id = azurerm_subnet.c.id
- route_table_id = azurerm_route_table.c.id
-}
-
-resource "azurerm_subnet_route_table_association" "d" {
- subnet_id = azurerm_subnet.d.id
- route_table_id = azurerm_route_table.d.id
-}
-
-resource "azurerm_virtual_machine" "tg" {
- name = "tg"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- primary_network_interface_id = azurerm_network_interface.tg_mng.id
- network_interface_ids = [azurerm_network_interface.tg_mng.id,
- azurerm_network_interface.tg_if1.id,
- azurerm_network_interface.tg_if2.id]
- vm_size = "Standard_F32s_v2"
- delete_os_disk_on_termination = true
- delete_data_disks_on_termination = true
- storage_os_disk {
- name = "OsDiskTG"
- caching = "ReadWrite"
- create_option = "FromImage"
- managed_disk_type = "StandardSSD_LRS"
- }
- storage_image_reference {
- publisher = "Canonical"
- offer = "UbuntuServer"
- sku = "18.04-LTS"
- version = "latest"
- }
- os_profile {
- computer_name = "tg"
- admin_username = "ubuntu"
- }
- os_profile_linux_config {
- disable_password_authentication = true
- ssh_keys {
- path = "/home/ubuntu/.ssh/authorized_keys"
- key_data = file("~/.ssh/id_rsa.pub")
- }
- }
- depends_on = [azurerm_resource_group.CSIT,
- azurerm_network_interface.tg_mng]
-}
-
-resource "azurerm_virtual_machine" "dut1" {
- name = "dut1"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- primary_network_interface_id = azurerm_network_interface.dut1_mng.id
- network_interface_ids = [azurerm_network_interface.dut1_mng.id,
- azurerm_network_interface.dut1_if1.id,
- azurerm_network_interface.dut1_if2.id]
- vm_size = "Standard_F32s_v2"
- delete_os_disk_on_termination = true
- delete_data_disks_on_termination = true
- storage_os_disk {
- name = "OsDiskDUT1"
- caching = "ReadWrite"
- create_option = "FromImage"
- managed_disk_type = "StandardSSD_LRS"
- }
- storage_image_reference {
- publisher = "Canonical"
- offer = "UbuntuServer"
- sku = "18.04-LTS"
- version = "latest"
- }
- os_profile {
- computer_name = "dut1"
- admin_username = "ubuntu"
- }
- os_profile_linux_config {
- disable_password_authentication = true
- ssh_keys {
- path = "/home/ubuntu/.ssh/authorized_keys"
- key_data = file("~/.ssh/id_rsa.pub")
- }
- }
- depends_on = [azurerm_resource_group.CSIT,
- azurerm_network_interface.dut1_mng]
-}
-
-resource "azurerm_virtual_machine" "dut2" {
- name = "dut2"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- primary_network_interface_id = azurerm_network_interface.dut2_mng.id
- network_interface_ids = [azurerm_network_interface.dut2_mng.id,
- azurerm_network_interface.dut2_if1.id,
- azurerm_network_interface.dut2_if2.id]
- vm_size = "Standard_F32s_v2"
- delete_os_disk_on_termination = true
- delete_data_disks_on_termination = true
- storage_os_disk {
- name = "OsDiskDUT2"
- caching = "ReadWrite"
- create_option = "FromImage"
- managed_disk_type = "StandardSSD_LRS"
- }
- storage_image_reference {
- publisher = "Canonical"
- offer = "UbuntuServer"
- sku = "18.04-LTS"
- version = "latest"
- }
- os_profile {
- computer_name = "dut2"
- admin_username = "ubuntu"
- }
- os_profile_linux_config {
- disable_password_authentication = true
- ssh_keys {
- path = "/home/ubuntu/.ssh/authorized_keys"
- key_data = file("~/.ssh/id_rsa.pub")
- }
- }
- depends_on = [azurerm_resource_group.CSIT,
- azurerm_network_interface.dut2_mng]
-}
-
-data "azurerm_public_ip" "tg_public_ip" {
- name = "tg_public_ip"
- resource_group_name = azurerm_resource_group.CSIT.name
- depends_on = [azurerm_virtual_machine.tg]
-}
-
-data "azurerm_public_ip" "dut1_public_ip" {
- name = "dut1_public_ip"
- resource_group_name = azurerm_resource_group.CSIT.name
- depends_on = [azurerm_virtual_machine.dut1]
-}
-
-data "azurerm_public_ip" "dut2_public_ip" {
- name = "dut2_public_ip"
- resource_group_name = azurerm_resource_group.CSIT.name
- depends_on = [azurerm_virtual_machine.dut2]
-}
-
-# Provisioning
-
-resource "null_resource" "deploy_tg" {
- depends_on = [azurerm_virtual_machine.tg,
- azurerm_network_interface.tg_if1,
- azurerm_network_interface.tg_if2]
- connection {
- user = "ubuntu"
- host = data.azurerm_public_ip.tg_public_ip.ip_address
- private_key = file("~/.ssh/id_rsa")
- }
- provisioner "ansible" {
- plays {
- playbook {
- file_path = "../../testbed-setup/ansible/site.yaml"
- force_handlers = true
- }
- hosts = ["tg_azure"]
- extra_vars = {
- ansible_python_interpreter = "/usr/bin/python3"
- azure = true
- }
- }
- }
-}
-
-resource "null_resource" "deploy_dut1" {
- depends_on = [azurerm_virtual_machine.dut1,
- azurerm_network_interface.dut1_if1,
- azurerm_network_interface.dut1_if2]
- connection {
- user = "ubuntu"
- host = data.azurerm_public_ip.dut1_public_ip.ip_address
- private_key = file("~/.ssh/id_rsa")
- }
- provisioner "ansible" {
- plays {
- playbook {
- file_path = "../../testbed-setup/ansible/site.yaml"
- force_handlers = true
- }
- hosts = ["sut_azure"]
- extra_vars = {
- ansible_python_interpreter = "/usr/bin/python3"
- azure = true
- }
- }
- }
-}
-
-resource "null_resource" "deploy_dut2" {
- depends_on = [azurerm_virtual_machine.dut2,
- azurerm_network_interface.dut2_if1,
- azurerm_network_interface.dut2_if2]
- connection {
- user = "ubuntu"
- host = data.azurerm_public_ip.dut2_public_ip.ip_address
- private_key = file("~/.ssh/id_rsa")
- }
- provisioner "ansible" {
- plays {
- playbook {
- file_path = "../../testbed-setup/ansible/site.yaml"
- force_handlers = true
- }
- hosts = ["sut_azure"]
- extra_vars = {
- ansible_python_interpreter = "/usr/bin/python3"
- azure = true
- }
- }
- }
-}
-
-resource "null_resource" "deploy_topology" {
- depends_on = [azurerm_virtual_machine.tg,
- azurerm_network_interface.tg_if1,
- azurerm_network_interface.tg_if2,
- azurerm_virtual_machine.dut1,
- azurerm_network_interface.dut1_if1,
- azurerm_network_interface.dut1_if2,
- azurerm_virtual_machine.dut2,
- azurerm_network_interface.dut2_if1,
- azurerm_network_interface.dut2_if2]
- provisioner "ansible" {
- plays {
- playbook {
- file_path = "../../testbed-setup/ansible/cloud_topology.yaml"
- }
- hosts = ["local"]
- extra_vars = {
- ansible_python_interpreter = "/usr/bin/python3"
- cloud_topology = "3n_azure_Fsv2"
- tg_if1_mac = data.azurerm_network_interface.tg_if1.mac_address
- tg_if2_mac = data.azurerm_network_interface.tg_if2.mac_address
- dut1_if1_mac = data.azurerm_network_interface.dut1_if1.mac_address
- dut1_if2_mac = data.azurerm_network_interface.dut1_if2.mac_address
- dut2_if1_mac = data.azurerm_network_interface.dut2_if1.mac_address
- dut2_if2_mac = data.azurerm_network_interface.dut2_if2.mac_address
- tg_public_ip = data.azurerm_public_ip.tg_public_ip.ip_address
- dut1_public_ip = data.azurerm_public_ip.dut1_public_ip.ip_address
- dut2_public_ip = data.azurerm_public_ip.dut2_public_ip.ip_address
- }
- }
- }
-}
-
-output "dbg_tg" {
- value = "TG IP: ${data.azurerm_public_ip.tg_public_ip.ip_address}"
-}
-
-output "dbg_dut1" {
- value = "DUT1 IP: ${data.azurerm_public_ip.dut1_public_ip.ip_address}"
-}
-
-output "dbg_dut2" {
- value = "DUT2 IP: ${data.azurerm_public_ip.dut2_public_ip.ip_address}"
-}
diff --git a/fdio.infra.terraform/3n_azure_fsv2/nic.tf b/fdio.infra.terraform/3n_azure_fsv2/nic.tf
deleted file mode 100644
index 0bc9e900a7..0000000000
--- a/fdio.infra.terraform/3n_azure_fsv2/nic.tf
+++ /dev/null
@@ -1,133 +0,0 @@
-# Create a network interface for the data-plane traffic
-
-resource "azurerm_network_interface" "dut1_if2" {
- name = "dut1_if2"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- network_security_group_id = azurerm_network_security_group.CSIT.id
- enable_ip_forwarding = "true"
- enable_accelerated_networking = "true"
-
- ip_configuration {
- name = "dut1_if2"
- subnet_id = azurerm_subnet.c.id
- private_ip_address_allocation = "Static"
- private_ip_address = "172.16.200.101"
- }
-}
-
-data "azurerm_network_interface" "dut1_if2" {
- name = "dut1_if2"
- resource_group_name = azurerm_resource_group.CSIT.name
- depends_on = [azurerm_virtual_machine.dut1]
-}
-
-resource "azurerm_network_interface" "dut2_if1" {
- name = "dut2_if1"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- network_security_group_id = azurerm_network_security_group.CSIT.id
- enable_ip_forwarding = "true"
- enable_accelerated_networking = "true"
-
- ip_configuration {
- name = "dut2_if1"
- subnet_id = azurerm_subnet.c.id
- private_ip_address_allocation = "Static"
- private_ip_address = "172.16.200.102"
- }
-}
-
-data "azurerm_network_interface" "dut2_if1" {
- name = "dut2_if1"
- resource_group_name = azurerm_resource_group.CSIT.name
- depends_on = [azurerm_virtual_machine.dut2]
-}
-
-resource "azurerm_network_interface" "dut1_if1" {
- name = "dut1_if1"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- network_security_group_id = azurerm_network_security_group.CSIT.id
- enable_ip_forwarding = "true"
- enable_accelerated_networking = "true"
-
- ip_configuration {
- name = "dut1_if1"
- subnet_id = azurerm_subnet.b.id
- private_ip_address_allocation = "Static"
- private_ip_address = "172.16.10.11"
- }
-}
-
-data "azurerm_network_interface" "dut1_if1" {
- name = "dut1_if1"
- resource_group_name = azurerm_resource_group.CSIT.name
- depends_on = [azurerm_virtual_machine.dut1]
-}
-
-resource "azurerm_network_interface" "dut2_if2" {
- name = "dut2_if2"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- network_security_group_id = azurerm_network_security_group.CSIT.id
- enable_ip_forwarding = "true"
- enable_accelerated_networking = "true"
-
- ip_configuration {
- name = "dut2_if2"
- subnet_id = azurerm_subnet.d.id
- private_ip_address_allocation = "Static"
- private_ip_address = "172.16.20.11"
- }
-}
-
-data "azurerm_network_interface" "dut2_if2" {
- name = "dut2_if2"
- resource_group_name = azurerm_resource_group.CSIT.name
- depends_on = [azurerm_virtual_machine.dut2]
-}
-
-resource "azurerm_network_interface" "tg_if1" {
- name = "tg_if1"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- network_security_group_id = azurerm_network_security_group.CSIT.id
- enable_ip_forwarding = "true"
- enable_accelerated_networking = "true"
-
- ip_configuration {
- name = "tg1"
- subnet_id = azurerm_subnet.b.id
- private_ip_address_allocation = "Static"
- private_ip_address = "172.16.10.250"
- }
-}
-
-data "azurerm_network_interface" "tg_if1" {
- name = "tg_if1"
- resource_group_name = azurerm_resource_group.CSIT.name
- depends_on = [azurerm_virtual_machine.tg]
-}
-
-resource "azurerm_network_interface" "tg_if2" {
- name = "tg_if2"
- location = azurerm_resource_group.CSIT.location
- resource_group_name = azurerm_resource_group.CSIT.name
- network_security_group_id = azurerm_network_security_group.CSIT.id
- enable_ip_forwarding = "true"
- enable_accelerated_networking = "true"
-
- ip_configuration {
- name = "tg2"
- subnet_id = azurerm_subnet.d.id
- private_ip_address_allocation = "Static"
- private_ip_address = "172.16.20.250"
- }
-}
-
-data "azurerm_network_interface" "tg_if2" {
- name = "tg_if2"
- resource_group_name = azurerm_resource_group.CSIT.name
- depends_on = [azurerm_virtual_machine.tg]
-}
diff --git a/fdio.infra.terraform/terraform-aws-1n-aws-c5n/hosts.tftpl b/fdio.infra.terraform/terraform-aws-1n-aws-c5n/hosts.tftpl
new file mode 100644
index 0000000000..58594efe9d
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-1n-aws-c5n/hosts.tftpl
@@ -0,0 +1,5 @@
+all:
+ children:
+ tg:
+ hosts:
+ ${tg_public_ip} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-1n-aws-c5n/main.tf b/fdio.infra.terraform/terraform-aws-1n-aws-c5n/main.tf
index c99d839bd2..e455b6b104 100644
--- a/fdio.infra.terraform/terraform-aws-1n-aws-c5n/main.tf
+++ b/fdio.infra.terraform/terraform-aws-1n-aws-c5n/main.tf
@@ -38,19 +38,24 @@ module "subnet_b" {
}
# Create Private Key
-resource "tls_private_key" "private_key" {
- algorithm = var.private_key_algorithm
- ecdsa_curve = var.private_key_ecdsa_curve
- rsa_bits = var.private_key_rsa_bits
+module "private_key" {
+ source = "pmikus/private-key/tls"
+ version = "4.0.4"
+
+ private_key_algorithm = var.private_key_algorithm
}
# Create Key Pair
-resource "aws_key_pair" "key_pair" {
- depends_on = [
- tls_private_key.private_key
- ]
- key_name = local.key_pair_key_name
- public_key = tls_private_key.private_key.public_key_openssh
+module "key_pair" {
+ source = "pmikus/key-pair/aws"
+ version = "5.7.0"
+
+ key_pair_key_name = local.key_pair_key_name
+ key_pair_public_key = module.private_key.public_key_openssh
+
+ key_pair_tags = {
+ "Environment" = local.environment
+ }
}
# Create Placement Group
@@ -70,7 +75,7 @@ resource "aws_instance" "tg" {
associate_public_ip_address = var.tg_associate_public_ip_address
instance_initiated_shutdown_behavior = var.tg_instance_initiated_shutdown_behavior
instance_type = var.tg_instance_type
- key_name = aws_key_pair.key_pair.key_name
+ key_name = module.key_pair.key_pair_key_name
placement_group = aws_placement_group.placement_group.id
private_ip = var.tg_private_ip
source_dest_check = var.tg_source_dest_check
@@ -167,7 +172,7 @@ resource "null_resource" "deploy_tg" {
connection {
user = "ubuntu"
host = aws_instance.tg.public_ip
- private_key = tls_private_key.private_key.private_key_pem
+ private_key = module.private_key.private_key_pem
}
provisioner "remote-exec" {
@@ -175,27 +180,32 @@ resource "null_resource" "deploy_tg" {
}
}
+resource "local_file" "topology_file" {
+ depends_on = [
+ aws_instance.tg
+ ]
-resource "null_resource" "deploy_topology" {
+ content = templatefile(
+ "${path.module}/topology-${local.topology_name}.tftpl",
+ {
+ tg_if1_mac = data.aws_network_interface.tg_if1.mac_address
+ tg_if2_mac = data.aws_network_interface.tg_if2.mac_address
+ tg_public_ip = aws_instance.tg.public_ip
+ }
+ )
+ filename = "${path.module}/../../topologies/available/${local.topology_name}-${local.testbed_name}.yaml"
+}
+
+resource "local_file" "hosts" {
depends_on = [
aws_instance.tg
]
- provisioner "ansible" {
- plays {
- playbook {
- file_path = var.ansible_topology_path
- }
- hosts = ["local"]
- extra_vars = {
- ansible_python_interpreter = local.ansible_python_executable
- testbed_name = local.testbed_name
- cloud_topology = local.topology_name
- tg_if1_mac = data.aws_network_interface.tg_if1.mac_address
- tg_if2_mac = data.aws_network_interface.tg_if2.mac_address
- tg_public_ip = aws_instance.tg.public_ip
- public_ip_list = "${aws_instance.tg.public_ip}"
- }
+ content = templatefile(
+ "${path.module}/hosts.tftpl",
+ {
+ tg_public_ip = aws_instance.tg.public_ip
}
- }
+ )
+ filename = "${path.module}/../../fdio.infra.ansible/inventories/cloud_inventory/hosts.yaml"
} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-1n-aws-c5n/providers.tf b/fdio.infra.terraform/terraform-aws-1n-aws-c5n/providers.tf
index d0e7490d38..2482ca2839 100644
--- a/fdio.infra.terraform/terraform-aws-1n-aws-c5n/providers.tf
+++ b/fdio.infra.terraform/terraform-aws-1n-aws-c5n/providers.tf
@@ -5,7 +5,7 @@ provider "aws" {
}
provider "vault" {
- address = "http://vault.service.consul:8200"
+ address = "http://10.30.51.24:8200"
skip_tls_verify = true
token = "s.4z5PsufFwV3sHbCzK9Y2Cojd"
} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-1n-aws-c5n/topology-1n-aws-c5n.tftpl b/fdio.infra.terraform/terraform-aws-1n-aws-c5n/topology-1n-aws-c5n.tftpl
new file mode 100644
index 0000000000..c99d1f5ebc
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-1n-aws-c5n/topology-1n-aws-c5n.tftpl
@@ -0,0 +1,30 @@
+---
+metadata:
+ version: 0.1
+ schema:
+ - resources/topology_schemas/1_node_topology.sch.yaml
+ - resources/topology_schemas/topology.sch.yaml
+ tags: [hw, 1-node]
+
+nodes:
+ TG:
+ type: TG
+ subtype: TREX
+ host: "${tg_public_ip}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ interfaces:
+ port1:
+ # tg_instance/p1 - 50GE port1 on ENA NIC.
+ mac_address: "${tg_if1_mac}"
+ pci_address: "0000:00:06.0"
+ link: link1
+ model: Amazon-Nitro-50G
+ port2:
+ # tg_instance/p2 - 50GE port2 on ENA NIC.
+ mac_address: "${tg_if2_mac}"
+ pci_address: "0000:00:07.0"
+ link: link1
+ model: Amazon-Nitro-50G \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-1n-aws-c5n/variables.tf b/fdio.infra.terraform/terraform-aws-1n-aws-c5n/variables.tf
index 0eb6bcc769..566a073266 100644
--- a/fdio.infra.terraform/terraform-aws-1n-aws-c5n/variables.tf
+++ b/fdio.infra.terraform/terraform-aws-1n-aws-c5n/variables.tf
@@ -24,19 +24,7 @@ variable "testbed_name" {
variable "private_key_algorithm" {
description = "The name of the algorithm to use for the key."
type = string
- default = "RSA"
-}
-
-variable "private_key_ecdsa_curve" {
- description = "When algorithm is ECDSA, the name of the elliptic curve to use."
- type = string
- default = "P521"
-}
-
-variable "private_key_rsa_bits" {
- description = "When algorithm is RSA, the size of the generated RSA key in bits."
- type = number
- default = 4096
+ default = "ED25519"
}
# Variables for Placement Group
@@ -50,7 +38,7 @@ variable "placement_group_strategy" {
variable "tg_ami" {
description = "AMI to use for the instance."
type = string
- default = "ami-01d1d62914ef00b25"
+ default = "ami-07430bfa17fd4e597"
}
variable "tg_associate_public_ip_address" {
diff --git a/fdio.infra.terraform/terraform-aws-1n-aws-c5n/versions.tf b/fdio.infra.terraform/terraform-aws-1n-aws-c5n/versions.tf
index 0eead1fc01..7e17bb4924 100644
--- a/fdio.infra.terraform/terraform-aws-1n-aws-c5n/versions.tf
+++ b/fdio.infra.terraform/terraform-aws-1n-aws-c5n/versions.tf
@@ -2,19 +2,19 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = ">= 4.3.0"
+ version = ">= 5.7.0"
}
null = {
source = "hashicorp/null"
- version = "~> 3.1.0"
+ version = ">= 3.2.1"
}
tls = {
source = "hashicorp/tls"
- version = "~> 3.1.0"
+ version = ">= 4.0.4"
}
vault = {
- version = ">=2.22.1"
+ version = ">= 3.15.2"
}
}
- required_version = ">= 1.0.4"
+ required_version = ">= 1.4.2"
}
diff --git a/fdio.infra.terraform/terraform-aws-1n-c6in/hosts.tftpl b/fdio.infra.terraform/terraform-aws-1n-c6in/hosts.tftpl
new file mode 100644
index 0000000000..58594efe9d
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-1n-c6in/hosts.tftpl
@@ -0,0 +1,5 @@
+all:
+ children:
+ tg:
+ hosts:
+ ${tg_public_ip} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-1n-c6in/main.tf b/fdio.infra.terraform/terraform-aws-1n-c6in/main.tf
new file mode 100644
index 0000000000..94cac297c2
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-1n-c6in/main.tf
@@ -0,0 +1,206 @@
+data "vault_aws_access_credentials" "creds" {
+ backend = "${var.vault-name}-path"
+ role = "${var.vault-name}-role"
+}
+
+locals {
+ ansible_python_executable = "/usr/bin/python3"
+ availability_zone = "eu-central-1b"
+ name = "csit-vpc"
+ environment = "csit-vpc-environment"
+ key_pair_key_name = "${var.resource_prefix}-${var.testbed_name}-pk"
+ placement_group_name = "${var.resource_prefix}-${var.testbed_name}-pg"
+ security_group_name = "${var.resource_prefix}-${var.testbed_name}-sg"
+ testbed_name = "testbed1"
+ topology_name = "1n-c6in"
+ tg_name = "${var.resource_prefix}-${var.testbed_name}-tg"
+ sut1_name = "${var.resource_prefix}-${var.testbed_name}-sut1"
+}
+
+# Create VPC
+module "vpc" {
+ source = "../terraform-aws-vpc"
+ security_group_name = local.security_group_name
+ subnet_availability_zone = local.availability_zone
+ tags_name = local.name
+ tags_environment = local.environment
+}
+
+# Create Subnet
+module "subnet_b" {
+ source = "../terraform-aws-subnet"
+ subnet_cidr_block = "192.168.10.0/24"
+ subnet_ipv6_cidr_block = cidrsubnet(module.vpc.vpc_ipv6_cidr_block, 8, 2)
+ subnet_availability_zone = local.availability_zone
+ tags_name = local.name
+ tags_environment = local.environment
+ subnet_vpc_id = module.vpc.vpc_id
+}
+
+# Create Private Key
+module "private_key" {
+ source = "pmikus/private-key/tls"
+ version = "4.0.4"
+
+ private_key_algorithm = var.private_key_algorithm
+}
+
+# Create Key Pair
+module "key_pair" {
+ source = "pmikus/key-pair/aws"
+ version = "5.7.0"
+
+ key_pair_key_name = local.key_pair_key_name
+ key_pair_public_key = module.private_key.public_key_openssh
+
+ key_pair_tags = {
+ "Environment" = local.environment
+ }
+}
+
+# Create Placement Group
+resource "aws_placement_group" "placement_group" {
+ name = local.placement_group_name
+ strategy = var.placement_group_strategy
+}
+
+# Create Instance
+resource "aws_instance" "tg" {
+ depends_on = [
+ module.vpc,
+ aws_placement_group.placement_group
+ ]
+ ami = var.tg_ami
+ availability_zone = local.availability_zone
+ associate_public_ip_address = var.tg_associate_public_ip_address
+ instance_initiated_shutdown_behavior = var.tg_instance_initiated_shutdown_behavior
+ instance_type = var.tg_instance_type
+ key_name = module.key_pair.key_pair_key_name
+ placement_group = aws_placement_group.placement_group.id
+ private_ip = var.tg_private_ip
+ source_dest_check = var.tg_source_dest_check
+ subnet_id = module.vpc.vpc_subnet_id
+ vpc_security_group_ids = [module.vpc.vpc_security_group_id]
+ # host_id = "1"
+
+ root_block_device {
+ delete_on_termination = true
+ volume_size = 50
+ }
+
+ tags = {
+ "Name" = local.tg_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "tg_if1" {
+ depends_on = [
+ module.subnet_b,
+ aws_instance.tg
+ ]
+ private_ips = [var.tg_if1_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.tg_source_dest_check
+ subnet_id = module.subnet_b.subnet_id
+
+ attachment {
+ instance = aws_instance.tg.id
+ device_index = 1
+ }
+
+ tags = {
+ "Name" = local.tg_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "tg_if2" {
+ depends_on = [
+ module.subnet_b,
+ aws_instance.tg
+ ]
+ private_ips = [var.tg_if2_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.tg_source_dest_check
+ subnet_id = module.subnet_b.subnet_id
+
+ attachment {
+ instance = aws_instance.tg.id
+ device_index = 2
+ }
+
+ tags = {
+ "Name" = local.tg_name
+ "Environment" = local.environment
+ }
+}
+
+data "aws_network_interface" "tg_if1" {
+ id = aws_network_interface.tg_if1.id
+}
+
+data "aws_network_interface" "tg_if2" {
+ id = aws_network_interface.tg_if2.id
+}
+
+resource "aws_route" "route_tg_if1" {
+ depends_on = [
+ aws_instance.tg
+ ]
+ destination_cidr_block = var.destination_cidr_block_tg_if1
+ network_interface_id = aws_instance.tg.primary_network_interface_id
+ route_table_id = module.vpc.vpc_main_route_table_id
+}
+
+resource "aws_route" "route_tg_if2" {
+ depends_on = [
+ aws_instance.tg
+ ]
+ destination_cidr_block = var.destination_cidr_block_tg_if2
+ network_interface_id = aws_instance.tg.primary_network_interface_id
+ route_table_id = module.vpc.vpc_main_route_table_id
+}
+
+resource "null_resource" "deploy_tg" {
+ depends_on = [
+ aws_instance.tg,
+ aws_network_interface.tg_if1,
+ aws_network_interface.tg_if2
+ ]
+
+ connection {
+ user = "ubuntu"
+ host = aws_instance.tg.public_ip
+ private_key = module.private_key.private_key_pem
+ }
+
+ provisioner "remote-exec" {
+ inline = var.first_run_commands
+ }
+}
+
+
+resource "null_resource" "deploy_topology" {
+ depends_on = [
+ aws_instance.tg
+ ]
+
+ provisioner "ansible" {
+ plays {
+ playbook {
+ file_path = var.ansible_topology_path
+ }
+ hosts = ["local"]
+ extra_vars = {
+ ansible_python_interpreter = local.ansible_python_executable
+ testbed_name = local.testbed_name
+ cloud_topology = local.topology_name
+ tg_if1_mac = data.aws_network_interface.tg_if1.mac_address
+ tg_if2_mac = data.aws_network_interface.tg_if2.mac_address
+ tg_public_ip = aws_instance.tg.public_ip
+ public_ip_list = "${aws_instance.tg.public_ip}"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-2n-aws-c6gn/output.tf b/fdio.infra.terraform/terraform-aws-1n-c6in/output.tf
index e69de29bb2..e69de29bb2 100644
--- a/fdio.infra.terraform/terraform-aws-2n-aws-c6gn/output.tf
+++ b/fdio.infra.terraform/terraform-aws-1n-c6in/output.tf
diff --git a/fdio.infra.terraform/terraform-aws-1n-c6in/providers.tf b/fdio.infra.terraform/terraform-aws-1n-c6in/providers.tf
new file mode 100644
index 0000000000..2482ca2839
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-1n-c6in/providers.tf
@@ -0,0 +1,11 @@
+provider "aws" {
+ region = var.region
+ access_key = data.vault_aws_access_credentials.creds.access_key
+ secret_key = data.vault_aws_access_credentials.creds.secret_key
+}
+
+provider "vault" {
+ address = "http://10.30.51.24:8200"
+ skip_tls_verify = true
+ token = "s.4z5PsufFwV3sHbCzK9Y2Cojd"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-1n-c6in/topology-1n-c6in.tftpl b/fdio.infra.terraform/terraform-aws-1n-c6in/topology-1n-c6in.tftpl
new file mode 100644
index 0000000000..dc24577bbd
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-1n-c6in/topology-1n-c6in.tftpl
@@ -0,0 +1,30 @@
+---
+metadata:
+ version: 0.1
+ schema:
+ - resources/topology_schemas/1_node_topology.sch.yaml
+ - resources/topology_schemas/topology.sch.yaml
+ tags: [hw, 1-node]
+
+nodes:
+ TG:
+ type: TG
+ subtype: TREX
+ host: "${tg_public_ip}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ interfaces:
+ port1:
+ # tg_instance/p1 - 200GE port1 on ENA NIC.
+ mac_address: "${tg_if1_mac}"
+ pci_address: "0000:00:06.0"
+ link: link1
+ model: Amazon-Nitro-200G
+ port2:
+ # tg_instance/p2 - 200GE port2 on ENA NIC.
+ mac_address: "${tg_if2_mac}"
+ pci_address: "0000:00:07.0"
+ link: link1
+ model: Amazon-Nitro-200G \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-1n-c6in/variables.tf b/fdio.infra.terraform/terraform-aws-1n-c6in/variables.tf
new file mode 100644
index 0000000000..db0cfa89da
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-1n-c6in/variables.tf
@@ -0,0 +1,120 @@
+variable "vault-name" {
+ default = "dynamic-aws-creds-vault-fdio-csit-jenkins"
+}
+
+variable "region" {
+ description = "AWS Region."
+ type = string
+ default = "eu-central-1"
+}
+
+variable "resource_prefix" {
+ description = "Resources name prefix."
+ type = string
+ default = "csit-1n-c6in"
+}
+
+variable "testbed_name" {
+ description = "Testbed name."
+ type = string
+ default = "testbed1"
+}
+
+# Variables for Private Key
+variable "private_key_algorithm" {
+ description = "The name of the algorithm to use for the key."
+ type = string
+ default = "ED25519"
+}
+
+# Variables for Placement Group
+variable "placement_group_strategy" {
+ description = "The placement strategy. Can be cluster, partition or spread."
+ type = string
+ default = "cluster"
+}
+
+# Variables for Instance
+variable "tg_ami" {
+ description = "AMI to use for the instance."
+ type = string
+ default = "ami-07430bfa17fd4e597"
+}
+
+variable "tg_associate_public_ip_address" {
+ description = "Whether to associate a public IP address with an instance in a VPC."
+ type = bool
+ default = true
+}
+
+variable "tg_instance_initiated_shutdown_behavior" {
+ description = "Shutdown behavior for the instance."
+ type = string
+ default = "terminate"
+}
+
+variable "tg_instance_type" {
+ description = "The instance type to use for the instance."
+ type = string
+ default = "c6in.4xlarge"
+}
+
+variable "tg_private_ip" {
+ description = "Private IP address to associate with the instance in a VPC."
+ type = string
+ default = "192.168.0.10"
+}
+
+variable "tg_source_dest_check" {
+ description = "Controls if traffic is routed to the instance when the destination address does not match the instance."
+ type = bool
+ default = false
+}
+
+# Variables for Network Interface
+variable "tg_if1_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "192.168.10.254"
+}
+
+variable "tg_if2_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "192.168.10.11"
+}
+
+variable "destination_cidr_block_tg_if1" {
+ description = "The destination CIDR block."
+ type = string
+ default = "10.0.0.0/16"
+}
+
+variable "destination_cidr_block_tg_if2" {
+ description = "The destination CIDR block."
+ type = string
+ default = "20.0.0.0/16"
+}
+
+# Variables for Null Resource
+variable "first_run_commands" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = list(string)
+ default = [
+ "sudo sed -i 's/^PasswordAuthentication/#PasswordAuthentication/' /etc/ssh/sshd_config",
+ "sudo systemctl restart sshd",
+ "sudo useradd --create-home -s /bin/bash provisionuser",
+ "echo 'provisionuser:Csit1234' | sudo chpasswd",
+ "echo 'provisionuser ALL = (ALL) NOPASSWD: ALL' | sudo tee -a /etc/sudoers",
+ "sudo useradd --create-home -s /bin/bash testuser",
+ "echo 'testuser:Csit1234' | sudo chpasswd",
+ "echo 'testuser ALL = (ALL) NOPASSWD: ALL' | sudo tee -a /etc/sudoers"
+ ]
+}
+
+# Variables for Null Resource
+variable "ansible_topology_path" {
+ description = "Ansible topology path."
+ type = string
+ default = "../../fdio.infra.ansible/cloud_topology.yaml"
+}
diff --git a/fdio.infra.terraform/terraform-aws-2n-aws-c6gn/versions.tf b/fdio.infra.terraform/terraform-aws-1n-c6in/versions.tf
index 0eead1fc01..7e17bb4924 100644
--- a/fdio.infra.terraform/terraform-aws-2n-aws-c6gn/versions.tf
+++ b/fdio.infra.terraform/terraform-aws-1n-c6in/versions.tf
@@ -2,19 +2,19 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = ">= 4.3.0"
+ version = ">= 5.7.0"
}
null = {
source = "hashicorp/null"
- version = "~> 3.1.0"
+ version = ">= 3.2.1"
}
tls = {
source = "hashicorp/tls"
- version = "~> 3.1.0"
+ version = ">= 4.0.4"
}
vault = {
- version = ">=2.22.1"
+ version = ">= 3.15.2"
}
}
- required_version = ">= 1.0.4"
+ required_version = ">= 1.4.2"
}
diff --git a/fdio.infra.terraform/terraform-aws-2n-aws-c5n/hosts.tftpl b/fdio.infra.terraform/terraform-aws-2n-aws-c5n/hosts.tftpl
new file mode 100644
index 0000000000..cb36dbb138
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-2n-aws-c5n/hosts.tftpl
@@ -0,0 +1,8 @@
+all:
+ children:
+ tg:
+ hosts:
+ ${tg_public_ip}
+ sut:
+ hosts:
+ ${dut1_public_ip} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-2n-aws-c5n/main.tf b/fdio.infra.terraform/terraform-aws-2n-aws-c5n/main.tf
index 699992db98..a7abab1214 100644
--- a/fdio.infra.terraform/terraform-aws-2n-aws-c5n/main.tf
+++ b/fdio.infra.terraform/terraform-aws-2n-aws-c5n/main.tf
@@ -49,19 +49,24 @@ module "subnet_d" {
}
# Create Private Key
-resource "tls_private_key" "private_key" {
- algorithm = var.private_key_algorithm
- ecdsa_curve = var.private_key_ecdsa_curve
- rsa_bits = var.private_key_rsa_bits
+module "private_key" {
+ source = "pmikus/private-key/tls"
+ version = "4.0.4"
+
+ private_key_algorithm = var.private_key_algorithm
}
# Create Key Pair
-resource "aws_key_pair" "key_pair" {
- depends_on = [
- tls_private_key.private_key
- ]
- key_name = local.key_pair_key_name
- public_key = tls_private_key.private_key.public_key_openssh
+module "key_pair" {
+ source = "pmikus/key-pair/aws"
+ version = "5.7.0"
+
+ key_pair_key_name = local.key_pair_key_name
+ key_pair_public_key = module.private_key.public_key_openssh
+
+ key_pair_tags = {
+ "Environment" = local.environment
+ }
}
# Create Placement Group
@@ -81,7 +86,7 @@ resource "aws_instance" "tg" {
associate_public_ip_address = var.tg_associate_public_ip_address
instance_initiated_shutdown_behavior = var.tg_instance_initiated_shutdown_behavior
instance_type = var.tg_instance_type
- key_name = aws_key_pair.key_pair.key_name
+ key_name = module.key_pair.key_pair_key_name
placement_group = aws_placement_group.placement_group.id
private_ip = var.tg_private_ip
source_dest_check = var.tg_source_dest_check
@@ -180,7 +185,7 @@ resource "aws_instance" "sut1" {
associate_public_ip_address = var.sut1_associate_public_ip_address
instance_initiated_shutdown_behavior = var.sut1_instance_initiated_shutdown_behavior
instance_type = var.sut1_instance_type
- key_name = aws_key_pair.key_pair.key_name
+ key_name = module.key_pair.key_pair_key_name
placement_group = aws_placement_group.placement_group.id
private_ip = var.sut1_private_ip
source_dest_check = var.sut1_source_dest_check
@@ -264,7 +269,7 @@ resource "null_resource" "deploy_tg" {
connection {
user = "ubuntu"
host = aws_instance.tg.public_ip
- private_key = tls_private_key.private_key.private_key_pem
+ private_key = module.private_key.private_key_pem
}
provisioner "remote-exec" {
@@ -285,7 +290,7 @@ resource "null_resource" "deploy_sut1" {
connection {
user = "ubuntu"
host = aws_instance.sut1.public_ip
- private_key = tls_private_key.private_key.private_key_pem
+ private_key = module.private_key.private_key_pem
}
provisioner "remote-exec" {
@@ -293,30 +298,38 @@ resource "null_resource" "deploy_sut1" {
}
}
-resource "null_resource" "deploy_topology" {
+resource "local_file" "topology_file" {
depends_on = [
aws_instance.tg,
aws_instance.sut1
]
- provisioner "ansible" {
- plays {
- playbook {
- file_path = var.ansible_topology_path
- }
- hosts = ["local"]
- extra_vars = {
- ansible_python_interpreter = local.ansible_python_executable
- testbed_name = local.testbed_name
- cloud_topology = local.topology_name
- tg_if1_mac = data.aws_network_interface.tg_if1.mac_address
- tg_if2_mac = data.aws_network_interface.tg_if2.mac_address
- dut1_if1_mac = data.aws_network_interface.sut1_if1.mac_address
- dut1_if2_mac = data.aws_network_interface.sut1_if2.mac_address
- tg_public_ip = aws_instance.tg.public_ip
- dut1_public_ip = aws_instance.sut1.public_ip
- public_ip_list = "${aws_instance.tg.public_ip},${aws_instance.sut1.public_ip}"
- }
+ content = templatefile(
+ "${path.module}/topology-${local.topology_name}.tftpl",
+ {
+ tg_if1_mac = data.aws_network_interface.tg_if1.mac_address
+ tg_if2_mac = data.aws_network_interface.tg_if2.mac_address
+ dut1_if1_mac = data.aws_network_interface.sut1_if1.mac_address
+ dut1_if2_mac = data.aws_network_interface.sut1_if2.mac_address
+ tg_public_ip = aws_instance.tg.public_ip
+ dut1_public_ip = aws_instance.sut1.public_ip
}
- }
+ )
+ filename = "${path.module}/../../topologies/available/${local.topology_name}-${local.testbed_name}.yaml"
+}
+
+resource "local_file" "hosts" {
+ depends_on = [
+ aws_instance.tg,
+ aws_instance.sut1
+ ]
+
+ content = templatefile(
+ "${path.module}/hosts.tftpl",
+ {
+ tg_public_ip = aws_instance.tg.public_ip
+ dut1_public_ip = aws_instance.sut1.public_ip
+ }
+ )
+ filename = "${path.module}/../../fdio.infra.ansible/inventories/cloud_inventory/hosts.yaml"
} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-2n-aws-c5n/providers.tf b/fdio.infra.terraform/terraform-aws-2n-aws-c5n/providers.tf
index d0e7490d38..2482ca2839 100644
--- a/fdio.infra.terraform/terraform-aws-2n-aws-c5n/providers.tf
+++ b/fdio.infra.terraform/terraform-aws-2n-aws-c5n/providers.tf
@@ -5,7 +5,7 @@ provider "aws" {
}
provider "vault" {
- address = "http://vault.service.consul:8200"
+ address = "http://10.30.51.24:8200"
skip_tls_verify = true
token = "s.4z5PsufFwV3sHbCzK9Y2Cojd"
} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-2n-aws-c5n/topology-2n-aws-c5n.tftpl b/fdio.infra.terraform/terraform-aws-2n-aws-c5n/topology-2n-aws-c5n.tftpl
new file mode 100644
index 0000000000..313b4b3d86
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-2n-aws-c5n/topology-2n-aws-c5n.tftpl
@@ -0,0 +1,51 @@
+---
+metadata:
+ version: 0.1
+ schema:
+ - resources/topology_schemas/2_node_topology.sch.yaml
+ - resources/topology_schemas/topology.sch.yaml
+ tags: [hw, 2-node]
+
+nodes:
+ TG:
+ type: TG
+ subtype: TREX
+ host: "${tg_public_ip}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ interfaces:
+ port1:
+ # tg_instance/p1 - 50GE port1 on ENA NIC.
+ mac_address: "${tg_if1_mac}"
+ pci_address: "0000:00:06.0"
+ link: link1
+ model: Amazon-Nitro-50G
+ port2:
+ # tg_instance/p2 - 50GE port2 on ENA NIC.
+ mac_address: "${tg_if2_mac}"
+ pci_address: "0000:00:07.0"
+ link: link2
+ model: Amazon-Nitro-50G
+ DUT1:
+ type: DUT
+ host: ${dut1_public_ip}
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ uio_driver: vfio-pci
+ interfaces:
+ port1:
+ # dut1_instance/p1 - 50GE port1 on ENA NIC.
+ mac_address: "${dut1_if1_mac}"
+ pci_address: "0000:00:06.0"
+ link: link1
+ model: Amazon-Nitro-50G
+ port2:
+ # dut1_instance/p2 - 50GE port2 on ENA NIC.
+ mac_address: "${dut1_if2_mac}"
+ pci_address: "0000:00:07.0"
+ link: link2
+ model: Amazon-Nitro-50G \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-2n-aws-c5n/variables.tf b/fdio.infra.terraform/terraform-aws-2n-aws-c5n/variables.tf
index e2ec8aa70c..de113166e6 100644
--- a/fdio.infra.terraform/terraform-aws-2n-aws-c5n/variables.tf
+++ b/fdio.infra.terraform/terraform-aws-2n-aws-c5n/variables.tf
@@ -24,19 +24,7 @@ variable "testbed_name" {
variable "private_key_algorithm" {
description = "The name of the algorithm to use for the key."
type = string
- default = "RSA"
-}
-
-variable "private_key_ecdsa_curve" {
- description = "When algorithm is ECDSA, the name of the elliptic curve to use."
- type = string
- default = "P521"
-}
-
-variable "private_key_rsa_bits" {
- description = "When algorithm is RSA, the size of the generated RSA key in bits."
- type = number
- default = 4096
+ default = "ED25519"
}
# Variables for Placement Group
@@ -50,7 +38,7 @@ variable "placement_group_strategy" {
variable "tg_ami" {
description = "AMI to use for the instance."
type = string
- default = "ami-01d1d62914ef00b25"
+ default = "ami-07430bfa17fd4e597"
}
variable "tg_associate_public_ip_address" {
@@ -86,7 +74,7 @@ variable "tg_source_dest_check" {
variable "sut1_ami" {
description = "AMI to use for the instance."
type = string
- default = "ami-0bfdf32a014984d8a"
+ default = "ami-0a890555652963ec2"
}
variable "sut1_associate_public_ip_address" {
diff --git a/fdio.infra.terraform/terraform-aws-2n-aws-c5n/versions.tf b/fdio.infra.terraform/terraform-aws-2n-aws-c5n/versions.tf
index 0eead1fc01..589699691e 100644
--- a/fdio.infra.terraform/terraform-aws-2n-aws-c5n/versions.tf
+++ b/fdio.infra.terraform/terraform-aws-2n-aws-c5n/versions.tf
@@ -2,19 +2,19 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = ">= 4.3.0"
+ version = ">= 5.7.0"
}
null = {
source = "hashicorp/null"
- version = "~> 3.1.0"
+ version = ">= 3.2.1"
}
tls = {
source = "hashicorp/tls"
- version = "~> 3.1.0"
+ version = ">= 4.0.4"
}
vault = {
- version = ">=2.22.1"
+ version = ">= 3.15.2"
}
}
- required_version = ">= 1.0.4"
-}
+ required_version = ">= 1.4.2"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-2n-c6gn/hosts.tftpl b/fdio.infra.terraform/terraform-aws-2n-c6gn/hosts.tftpl
new file mode 100644
index 0000000000..cb36dbb138
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-2n-c6gn/hosts.tftpl
@@ -0,0 +1,8 @@
+all:
+ children:
+ tg:
+ hosts:
+ ${tg_public_ip}
+ sut:
+ hosts:
+ ${dut1_public_ip} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-2n-aws-c6gn/main.tf b/fdio.infra.terraform/terraform-aws-2n-c6gn/main.tf
index 1f190426ad..b3da06ba76 100644
--- a/fdio.infra.terraform/terraform-aws-2n-aws-c6gn/main.tf
+++ b/fdio.infra.terraform/terraform-aws-2n-c6gn/main.tf
@@ -12,7 +12,7 @@ locals {
placement_group_name = "${var.resource_prefix}-${var.testbed_name}-pg"
security_group_name = "${var.resource_prefix}-${var.testbed_name}-sg"
testbed_name = "testbed1"
- topology_name = "2n-aws-c6gn"
+ topology_name = "2n-c6gn"
tg_name = "${var.resource_prefix}-${var.testbed_name}-tg"
sut1_name = "${var.resource_prefix}-${var.testbed_name}-sut1"
}
@@ -49,19 +49,24 @@ module "subnet_d" {
}
# Create Private Key
-resource "tls_private_key" "private_key" {
- algorithm = var.private_key_algorithm
- ecdsa_curve = var.private_key_ecdsa_curve
- rsa_bits = var.private_key_rsa_bits
+module "private_key" {
+ source = "pmikus/private-key/tls"
+ version = "4.0.4"
+
+ private_key_algorithm = var.private_key_algorithm
}
# Create Key Pair
-resource "aws_key_pair" "key_pair" {
- depends_on = [
- tls_private_key.private_key
- ]
- key_name = local.key_pair_key_name
- public_key = tls_private_key.private_key.public_key_openssh
+module "key_pair" {
+ source = "pmikus/key-pair/aws"
+ version = "5.7.0"
+
+ key_pair_key_name = local.key_pair_key_name
+ key_pair_public_key = module.private_key.public_key_openssh
+
+ key_pair_tags = {
+ "Environment" = local.environment
+ }
}
# Create Placement Group
@@ -81,7 +86,7 @@ resource "aws_instance" "tg" {
associate_public_ip_address = var.tg_associate_public_ip_address
instance_initiated_shutdown_behavior = var.tg_instance_initiated_shutdown_behavior
instance_type = var.tg_instance_type
- key_name = aws_key_pair.key_pair.key_name
+ key_name = module.key_pair.key_pair_key_name
placement_group = aws_placement_group.placement_group.id
private_ip = var.tg_private_ip
source_dest_check = var.tg_source_dest_check
@@ -180,7 +185,7 @@ resource "aws_instance" "sut1" {
associate_public_ip_address = var.sut1_associate_public_ip_address
instance_initiated_shutdown_behavior = var.sut1_instance_initiated_shutdown_behavior
instance_type = var.sut1_instance_type
- key_name = aws_key_pair.key_pair.key_name
+ key_name = module.key_pair.key_pair_key_name
placement_group = aws_placement_group.placement_group.id
private_ip = var.sut1_private_ip
source_dest_check = var.sut1_source_dest_check
@@ -264,7 +269,7 @@ resource "null_resource" "deploy_tg" {
connection {
user = "ubuntu"
host = aws_instance.tg.public_ip
- private_key = tls_private_key.private_key.private_key_pem
+ private_key = module.private_key.private_key_pem
}
provisioner "remote-exec" {
@@ -285,7 +290,7 @@ resource "null_resource" "deploy_sut1" {
connection {
user = "ubuntu"
host = aws_instance.sut1.public_ip
- private_key = tls_private_key.private_key.private_key_pem
+ private_key = module.private_key.private_key_pem
}
provisioner "remote-exec" {
@@ -293,30 +298,38 @@ resource "null_resource" "deploy_sut1" {
}
}
-resource "null_resource" "deploy_topology" {
+resource "local_file" "topology_file" {
depends_on = [
aws_instance.tg,
aws_instance.sut1
]
- provisioner "ansible" {
- plays {
- playbook {
- file_path = var.ansible_topology_path
- }
- hosts = ["local"]
- extra_vars = {
- ansible_python_interpreter = local.ansible_python_executable
- testbed_name = local.testbed_name
- cloud_topology = local.topology_name
- tg_if1_mac = data.aws_network_interface.tg_if1.mac_address
- tg_if2_mac = data.aws_network_interface.tg_if2.mac_address
- dut1_if1_mac = data.aws_network_interface.sut1_if1.mac_address
- dut1_if2_mac = data.aws_network_interface.sut1_if2.mac_address
- tg_public_ip = aws_instance.tg.public_ip
- dut1_public_ip = aws_instance.sut1.public_ip
- public_ip_list = "${aws_instance.tg.public_ip},${aws_instance.sut1.public_ip}"
- }
+ content = templatefile(
+ "${path.module}/topology-${local.topology_name}.tftpl",
+ {
+ tg_if1_mac = data.aws_network_interface.tg_if1.mac_address
+ tg_if2_mac = data.aws_network_interface.tg_if2.mac_address
+ dut1_if1_mac = data.aws_network_interface.sut1_if1.mac_address
+ dut1_if2_mac = data.aws_network_interface.sut1_if2.mac_address
+ tg_public_ip = aws_instance.tg.public_ip
+ dut1_public_ip = aws_instance.sut1.public_ip
}
- }
+ )
+ filename = "${path.module}/../../topologies/available/${local.topology_name}-${local.testbed_name}.yaml"
+}
+
+resource "local_file" "hosts" {
+ depends_on = [
+ aws_instance.tg,
+ aws_instance.sut1
+ ]
+
+ content = templatefile(
+ "${path.module}/hosts.tftpl",
+ {
+ tg_public_ip = aws_instance.tg.public_ip
+ dut1_public_ip = aws_instance.sut1.public_ip
+ }
+ )
+ filename = "${path.module}/../../fdio.infra.ansible/inventories/cloud_inventory/hosts.yaml"
} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-2n-c6gn/output.tf b/fdio.infra.terraform/terraform-aws-2n-c6gn/output.tf
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-2n-c6gn/output.tf
diff --git a/fdio.infra.terraform/terraform-aws-2n-aws-c6gn/providers.tf b/fdio.infra.terraform/terraform-aws-2n-c6gn/providers.tf
index d0e7490d38..d0e7490d38 100644
--- a/fdio.infra.terraform/terraform-aws-2n-aws-c6gn/providers.tf
+++ b/fdio.infra.terraform/terraform-aws-2n-c6gn/providers.tf
diff --git a/fdio.infra.terraform/terraform-aws-2n-c6gn/topology-2n-c6gn.tftpl b/fdio.infra.terraform/terraform-aws-2n-c6gn/topology-2n-c6gn.tftpl
new file mode 100644
index 0000000000..a0fa5fc191
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-2n-c6gn/topology-2n-c6gn.tftpl
@@ -0,0 +1,51 @@
+---
+metadata:
+ version: 0.1
+ schema:
+ - resources/topology_schemas/2_node_topology.sch.yaml
+ - resources/topology_schemas/topology.sch.yaml
+ tags: [hw, 2-node]
+
+nodes:
+ TG:
+ type: TG
+ subtype: TREX
+ host: "${tg_public_ip}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ interfaces:
+ port1:
+ # tg_instance/p1 - 100GE port1 on ENA NIC.
+ mac_address: "${tg_if1_mac}"
+ pci_address: "0000:00:06.0"
+ link: link1
+ model: Amazon-Nitro-100G
+ port2:
+ # tg_instance/p2 - 100GE port2 on ENA NIC.
+ mac_address: "${tg_if2_mac}"
+ pci_address: "0000:00:07.0"
+ link: link2
+ model: Amazon-Nitro-100G
+ DUT1:
+ type: DUT
+ host: "${dut1_public_ip}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ uio_driver: vfio-pci
+ interfaces:
+ port1:
+ # dut1_instance/p1 - 100GE port1 on ENA NIC.
+ mac_address: "${dut1_if1_mac}"
+ pci_address: "0000:00:06.0"
+ link: link1
+ model: Amazon-Nitro-100G
+ port2:
+ # dut1_instance/p2 - 100GE port2 on ENA NIC.
+ mac_address: "${dut1_if2_mac}"
+ pci_address: "0000:00:07.0"
+ link: link2
+ model: Amazon-Nitro-100G \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-2n-aws-c6gn/variables.tf b/fdio.infra.terraform/terraform-aws-2n-c6gn/variables.tf
index 5ac9ad8735..fec833126d 100644
--- a/fdio.infra.terraform/terraform-aws-2n-aws-c6gn/variables.tf
+++ b/fdio.infra.terraform/terraform-aws-2n-c6gn/variables.tf
@@ -11,7 +11,7 @@ variable "region" {
variable "resource_prefix" {
description = "Resources name prefix."
type = string
- default = "csit-2n-aws-c6gn"
+ default = "csit-2n-c6gn"
}
variable "testbed_name" {
@@ -50,7 +50,7 @@ variable "placement_group_strategy" {
variable "tg_ami" {
description = "AMI to use for the instance."
type = string
- default = "ami-0c2d02d48236a23dd"
+ default = "ami-07430bfa17fd4e597"
}
variable "tg_associate_public_ip_address" {
@@ -68,7 +68,7 @@ variable "tg_instance_initiated_shutdown_behavior" {
variable "tg_instance_type" {
description = "The instance type to use for the instance."
type = string
- default = "c6gn.4xlarge"
+ default = "c6in.4xlarge"
}
variable "tg_private_ip" {
@@ -86,7 +86,7 @@ variable "tg_source_dest_check" {
variable "sut1_ami" {
description = "AMI to use for the instance."
type = string
- default = "ami-05ea90e57d2df4368"
+ default = "ami-0cebabdc14ee56909"
}
variable "sut1_associate_public_ip_address" {
diff --git a/fdio.infra.terraform/terraform-aws-2n-c6gn/versions.tf b/fdio.infra.terraform/terraform-aws-2n-c6gn/versions.tf
new file mode 100644
index 0000000000..589699691e
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-2n-c6gn/versions.tf
@@ -0,0 +1,20 @@
+terraform {
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 5.7.0"
+ }
+ null = {
+ source = "hashicorp/null"
+ version = ">= 3.2.1"
+ }
+ tls = {
+ source = "hashicorp/tls"
+ version = ">= 4.0.4"
+ }
+ vault = {
+ version = ">= 3.15.2"
+ }
+ }
+ required_version = ">= 1.4.2"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-2n-c6in/hosts.tftpl b/fdio.infra.terraform/terraform-aws-2n-c6in/hosts.tftpl
new file mode 100644
index 0000000000..cb36dbb138
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-2n-c6in/hosts.tftpl
@@ -0,0 +1,8 @@
+all:
+ children:
+ tg:
+ hosts:
+ ${tg_public_ip}
+ sut:
+ hosts:
+ ${dut1_public_ip} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-2n-c6in/main.tf b/fdio.infra.terraform/terraform-aws-2n-c6in/main.tf
new file mode 100644
index 0000000000..5edeb46ba2
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-2n-c6in/main.tf
@@ -0,0 +1,335 @@
+data "vault_aws_access_credentials" "creds" {
+ backend = "${var.vault-name}-path"
+ role = "${var.vault-name}-role"
+}
+
+locals {
+ ansible_python_executable = "/usr/bin/python3"
+ availability_zone = "eu-central-1b"
+ name = "csit-vpc"
+ environment = "csit-vpc-environment"
+ key_pair_key_name = "${var.resource_prefix}-${var.testbed_name}-pk"
+ placement_group_name = "${var.resource_prefix}-${var.testbed_name}-pg"
+ security_group_name = "${var.resource_prefix}-${var.testbed_name}-sg"
+ testbed_name = "testbed1"
+ topology_name = "2n-c6in"
+ tg_name = "${var.resource_prefix}-${var.testbed_name}-tg"
+ sut1_name = "${var.resource_prefix}-${var.testbed_name}-sut1"
+}
+
+# Create VPC
+module "vpc" {
+ source = "../terraform-aws-vpc"
+ security_group_name = local.security_group_name
+ subnet_availability_zone = local.availability_zone
+ tags_name = local.name
+ tags_environment = local.environment
+ vpc_enable_dns_hostnames = false
+}
+
+# Create Subnet
+module "subnet_b" {
+ source = "../terraform-aws-subnet"
+ subnet_cidr_block = "192.168.10.0/24"
+ subnet_ipv6_cidr_block = cidrsubnet(module.vpc.vpc_ipv6_cidr_block, 8, 2)
+ subnet_availability_zone = local.availability_zone
+ tags_name = local.name
+ tags_environment = local.environment
+ subnet_vpc_id = module.vpc.vpc_id
+}
+
+module "subnet_d" {
+ source = "../terraform-aws-subnet"
+ subnet_cidr_block = "192.168.20.0/24"
+ subnet_ipv6_cidr_block = cidrsubnet(module.vpc.vpc_ipv6_cidr_block, 8, 4)
+ subnet_availability_zone = local.availability_zone
+ tags_name = local.name
+ tags_environment = local.environment
+ subnet_vpc_id = module.vpc.vpc_id
+}
+
+# Create Private Key
+module "private_key" {
+ source = "pmikus/private-key/tls"
+ version = "4.0.4"
+
+ private_key_algorithm = var.private_key_algorithm
+}
+
+# Create Key Pair
+module "key_pair" {
+ source = "pmikus/key-pair/aws"
+ version = "5.7.0"
+
+ key_pair_key_name = local.key_pair_key_name
+ key_pair_public_key = module.private_key.public_key_openssh
+
+ key_pair_tags = {
+ "Environment" = local.environment
+ }
+}
+
+# Create Placement Group
+resource "aws_placement_group" "placement_group" {
+ name = local.placement_group_name
+ strategy = var.placement_group_strategy
+}
+
+# Create Instance
+resource "aws_instance" "tg" {
+ depends_on = [
+ module.vpc,
+ aws_placement_group.placement_group
+ ]
+ ami = var.tg_ami
+ availability_zone = local.availability_zone
+ associate_public_ip_address = var.tg_associate_public_ip_address
+ instance_initiated_shutdown_behavior = var.tg_instance_initiated_shutdown_behavior
+ instance_type = var.tg_instance_type
+ key_name = module.key_pair.key_pair_key_name
+ placement_group = aws_placement_group.placement_group.id
+ private_ip = var.tg_private_ip
+ source_dest_check = var.tg_source_dest_check
+ subnet_id = module.vpc.vpc_subnet_id
+ vpc_security_group_ids = [module.vpc.vpc_security_group_id]
+ # host_id = "1"
+
+ root_block_device {
+ delete_on_termination = true
+ volume_size = 50
+ }
+
+ tags = {
+ "Name" = local.tg_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "tg_if1" {
+ depends_on = [
+ module.subnet_b,
+ aws_instance.tg
+ ]
+ private_ip = var.tg_if1_private_ip
+ private_ips = [var.tg_if1_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.tg_source_dest_check
+ subnet_id = module.subnet_b.subnet_id
+
+ attachment {
+ instance = aws_instance.tg.id
+ device_index = 1
+ }
+
+ tags = {
+ "Name" = local.tg_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "tg_if2" {
+ depends_on = [
+ module.subnet_d,
+ aws_instance.tg
+ ]
+ private_ip = var.tg_if2_private_ip
+ private_ips = [var.tg_if2_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.tg_source_dest_check
+ subnet_id = module.subnet_d.subnet_id
+
+ attachment {
+ instance = aws_instance.tg.id
+ device_index = 2
+ }
+
+ tags = {
+ "Name" = local.tg_name
+ "Environment" = local.environment
+ }
+}
+
+data "aws_network_interface" "tg_if1" {
+ id = aws_network_interface.tg_if1.id
+}
+
+data "aws_network_interface" "tg_if2" {
+ id = aws_network_interface.tg_if2.id
+}
+
+resource "aws_route" "route_tg_if1" {
+ depends_on = [
+ aws_instance.tg
+ ]
+ destination_cidr_block = var.destination_cidr_block_tg_if1
+ network_interface_id = aws_instance.tg.primary_network_interface_id
+ route_table_id = module.vpc.vpc_main_route_table_id
+}
+
+resource "aws_route" "route_tg_if2" {
+ depends_on = [
+ aws_instance.tg
+ ]
+ destination_cidr_block = var.destination_cidr_block_tg_if2
+ network_interface_id = aws_instance.tg.primary_network_interface_id
+ route_table_id = module.vpc.vpc_main_route_table_id
+}
+
+resource "aws_instance" "sut1" {
+ depends_on = [
+ module.vpc,
+ aws_placement_group.placement_group
+ ]
+ ami = var.sut1_ami
+ availability_zone = local.availability_zone
+ associate_public_ip_address = var.sut1_associate_public_ip_address
+ instance_initiated_shutdown_behavior = var.sut1_instance_initiated_shutdown_behavior
+ instance_type = var.sut1_instance_type
+ key_name = module.key_pair.key_pair_key_name
+ placement_group = aws_placement_group.placement_group.id
+ private_ip = var.sut1_private_ip
+ source_dest_check = var.sut1_source_dest_check
+ subnet_id = module.vpc.vpc_subnet_id
+ vpc_security_group_ids = [module.vpc.vpc_security_group_id]
+ # host_id = "2"
+
+ root_block_device {
+ delete_on_termination = true
+ volume_size = 50
+ }
+
+ tags = {
+ "Name" = local.sut1_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "sut1_if1" {
+ depends_on = [
+ module.subnet_b,
+ aws_instance.sut1
+ ]
+ private_ip = var.sut1_if1_private_ip
+ private_ips = [var.sut1_if1_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.sut1_source_dest_check
+ subnet_id = module.subnet_b.subnet_id
+
+ attachment {
+ instance = aws_instance.sut1.id
+ device_index = 1
+ }
+
+ tags = {
+ "Name" = local.sut1_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "sut1_if2" {
+ depends_on = [
+ module.subnet_d,
+ aws_instance.sut1
+ ]
+ private_ip = var.sut1_if2_private_ip
+ private_ips = [var.sut1_if2_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.sut1_source_dest_check
+ subnet_id = module.subnet_d.subnet_id
+
+ attachment {
+ instance = aws_instance.sut1.id
+ device_index = 2
+ }
+
+ tags = {
+ "Name" = local.sut1_name
+ "Environment" = local.environment
+ }
+}
+
+data "aws_network_interface" "sut1_if1" {
+ id = aws_network_interface.sut1_if1.id
+}
+
+data "aws_network_interface" "sut1_if2" {
+ id = aws_network_interface.sut1_if2.id
+}
+
+resource "null_resource" "deploy_tg" {
+ depends_on = [
+ aws_instance.tg,
+ aws_network_interface.tg_if1,
+ aws_network_interface.tg_if2,
+ aws_instance.sut1,
+ aws_network_interface.sut1_if1,
+ aws_network_interface.sut1_if2
+ ]
+
+ connection {
+ user = "ubuntu"
+ host = aws_instance.tg.public_ip
+ private_key = module.private_key.private_key_pem
+ }
+
+ provisioner "remote-exec" {
+ inline = var.first_run_commands
+ }
+}
+
+resource "null_resource" "deploy_sut1" {
+ depends_on = [
+ aws_instance.tg,
+ aws_network_interface.tg_if1,
+ aws_network_interface.tg_if2,
+ aws_instance.sut1,
+ aws_network_interface.sut1_if1,
+ aws_network_interface.sut1_if2
+ ]
+
+ connection {
+ user = "ubuntu"
+ host = aws_instance.sut1.public_ip
+ private_key = module.private_key.private_key_pem
+ }
+
+ provisioner "remote-exec" {
+ inline = var.first_run_commands
+ }
+}
+
+resource "local_file" "topology_file" {
+ depends_on = [
+ aws_instance.tg,
+ aws_instance.sut1
+ ]
+
+ content = templatefile(
+ "${path.module}/topology-${local.topology_name}.tftpl",
+ {
+ tg_if1_mac = data.aws_network_interface.tg_if1.mac_address
+ tg_if2_mac = data.aws_network_interface.tg_if2.mac_address
+ dut1_if1_mac = data.aws_network_interface.sut1_if1.mac_address
+ dut1_if2_mac = data.aws_network_interface.sut1_if2.mac_address
+ tg_public_ip = aws_instance.tg.public_ip
+ dut1_public_ip = aws_instance.sut1.public_ip
+ }
+ )
+ filename = "${path.module}/../../topologies/available/${local.topology_name}-${local.testbed_name}.yaml"
+}
+
+resource "local_file" "hosts" {
+ depends_on = [
+ aws_instance.tg,
+ aws_instance.sut1
+ ]
+
+ content = templatefile(
+ "${path.module}/hosts.tftpl",
+ {
+ tg_public_ip = aws_instance.tg.public_ip
+ dut1_public_ip = aws_instance.sut1.public_ip
+ }
+ )
+ filename = "${path.module}/../../fdio.infra.ansible/inventories/cloud_inventory/hosts.yaml"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-2n-c6in/output.tf b/fdio.infra.terraform/terraform-aws-2n-c6in/output.tf
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-2n-c6in/output.tf
diff --git a/fdio.infra.terraform/terraform-aws-2n-c6in/providers.tf b/fdio.infra.terraform/terraform-aws-2n-c6in/providers.tf
new file mode 100644
index 0000000000..d0e7490d38
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-2n-c6in/providers.tf
@@ -0,0 +1,11 @@
+provider "aws" {
+ region = var.region
+ access_key = data.vault_aws_access_credentials.creds.access_key
+ secret_key = data.vault_aws_access_credentials.creds.secret_key
+}
+
+provider "vault" {
+ address = "http://vault.service.consul:8200"
+ skip_tls_verify = true
+ token = "s.4z5PsufFwV3sHbCzK9Y2Cojd"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-2n-c6in/topology-2n-c6in.tftpl b/fdio.infra.terraform/terraform-aws-2n-c6in/topology-2n-c6in.tftpl
new file mode 100644
index 0000000000..d012d335b6
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-2n-c6in/topology-2n-c6in.tftpl
@@ -0,0 +1,51 @@
+---
+metadata:
+ version: 0.1
+ schema:
+ - resources/topology_schemas/2_node_topology.sch.yaml
+ - resources/topology_schemas/topology.sch.yaml
+ tags: [hw, 2-node]
+
+nodes:
+ TG:
+ type: TG
+ subtype: TREX
+ host: "${tg_public_ip}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ interfaces:
+ port1:
+ # tg_instance/p1 - 200GE port1 on ENA NIC.
+ mac_address: "${tg_if1_mac}"
+ pci_address: "0000:00:06.0"
+ link: link1
+ model: Amazon-Nitro-200G
+ port2:
+ # tg_instance/p2 - 200GE port2 on ENA NIC.
+ mac_address: "${tg_if2_mac}"
+ pci_address: "0000:00:07.0"
+ link: link2
+ model: Amazon-Nitro-200G
+ DUT1:
+ type: DUT
+ host: ${dut1_public_ip}
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ uio_driver: vfio-pci
+ interfaces:
+ port1:
+ # dut1_instance/p1 - 200GE port1 on ENA NIC.
+ mac_address: "${dut1_if1_mac}"
+ pci_address: "0000:00:06.0"
+ link: link1
+ model: Amazon-Nitro-200G
+ port2:
+ # dut1_instance/p2 - 200GE port2 on ENA NIC.
+ mac_address: "${dut1_if2_mac}"
+ pci_address: "0000:00:07.0"
+ link: link2
+ model: Amazon-Nitro-200G \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-2n-c6in/variables.tf b/fdio.infra.terraform/terraform-aws-2n-c6in/variables.tf
new file mode 100644
index 0000000000..51af9587d9
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-2n-c6in/variables.tf
@@ -0,0 +1,180 @@
+variable "vault-name" {
+ default = "dynamic-aws-creds-vault-fdio-csit-jenkins"
+}
+
+variable "region" {
+ description = "AWS Region."
+ type = string
+ default = "eu-central-1"
+}
+
+variable "resource_prefix" {
+ description = "Resources name prefix."
+ type = string
+ default = "csit-2n-c6in"
+}
+
+variable "testbed_name" {
+ description = "Testbed name."
+ type = string
+ default = "testbed1"
+}
+
+# Variables for Private Key
+variable "private_key_algorithm" {
+ description = "The name of the algorithm to use for the key."
+ type = string
+ default = "RSA"
+}
+
+variable "private_key_ecdsa_curve" {
+ description = "When algorithm is ECDSA, the name of the elliptic curve to use."
+ type = string
+ default = "P521"
+}
+
+variable "private_key_rsa_bits" {
+ description = "When algorithm is RSA, the size of the generated RSA key in bits."
+ type = number
+ default = 4096
+}
+
+# Variables for Placement Group
+variable "placement_group_strategy" {
+ description = "The placement strategy. Can be cluster, partition or spread."
+ type = string
+ default = "cluster"
+}
+
+# Variables for Instance
+variable "tg_ami" {
+ description = "AMI to use for the instance."
+ type = string
+ default = "ami-07430bfa17fd4e597"
+}
+
+variable "tg_associate_public_ip_address" {
+ description = "Whether to associate a public IP address with an instance in a VPC."
+ type = bool
+ default = true
+}
+
+variable "tg_instance_initiated_shutdown_behavior" {
+ description = "Shutdown behavior for the instance."
+ type = string
+ default = "terminate"
+}
+
+variable "tg_instance_type" {
+ description = "The instance type to use for the instance."
+ type = string
+ default = "c6in.4xlarge"
+}
+
+variable "tg_private_ip" {
+ description = "Private IP address to associate with the instance in a VPC."
+ type = string
+ default = "192.168.0.10"
+}
+
+variable "tg_source_dest_check" {
+ description = "Controls if traffic is routed to the instance when the destination address does not match the instance."
+ type = bool
+ default = false
+}
+
+variable "sut1_ami" {
+ description = "AMI to use for the instance."
+ type = string
+ default = "ami-0a890555652963ec2"
+}
+
+variable "sut1_associate_public_ip_address" {
+ description = "Whether to associate a public IP address with an instance in a VPC."
+ type = bool
+ default = true
+}
+
+variable "sut1_instance_initiated_shutdown_behavior" {
+ description = "Shutdown behavior for the instance."
+ type = string
+ default = "terminate"
+}
+
+variable "sut1_instance_type" {
+ description = "The instance type to use for the instance."
+ type = string
+ default = "c6in.4xlarge"
+}
+
+variable "sut1_private_ip" {
+ description = "Private IP address to associate with the instance in a VPC."
+ type = string
+ default = "192.168.0.11"
+}
+
+variable "sut1_source_dest_check" {
+ description = "Controls if traffic is routed to the instance when the destination address does not match the instance."
+ type = bool
+ default = false
+}
+
+# Variables for Network Interface
+variable "tg_if1_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "192.168.10.254"
+}
+
+variable "tg_if2_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "192.168.20.254"
+}
+
+variable "destination_cidr_block_tg_if1" {
+ description = "The destination CIDR block."
+ type = string
+ default = "10.0.0.0/24"
+}
+
+variable "destination_cidr_block_tg_if2" {
+ description = "The destination CIDR block."
+ type = string
+ default = "20.0.0.0/24"
+}
+
+variable "sut1_if1_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "192.168.10.11"
+}
+
+variable "sut1_if2_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "192.168.20.11"
+}
+
+# Variables for Null Resource
+variable "first_run_commands" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = list(string)
+ default = [
+ "sudo sed -i 's/^PasswordAuthentication/#PasswordAuthentication/' /etc/ssh/sshd_config",
+ "sudo systemctl restart sshd",
+ "sudo useradd --create-home -s /bin/bash provisionuser",
+ "echo 'provisionuser:Csit1234' | sudo chpasswd",
+ "echo 'provisionuser ALL = (ALL) NOPASSWD: ALL' | sudo tee -a /etc/sudoers",
+ "sudo useradd --create-home -s /bin/bash testuser",
+ "echo 'testuser:Csit1234' | sudo chpasswd",
+ "echo 'testuser ALL = (ALL) NOPASSWD: ALL' | sudo tee -a /etc/sudoers"
+ ]
+}
+
+# Variables for Null Resource
+variable "ansible_topology_path" {
+ description = "Ansible topology path."
+ type = string
+ default = "../../fdio.infra.ansible/cloud_topology.yaml"
+}
diff --git a/fdio.infra.terraform/terraform-aws-2n-c6in/versions.tf b/fdio.infra.terraform/terraform-aws-2n-c6in/versions.tf
new file mode 100644
index 0000000000..589699691e
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-2n-c6in/versions.tf
@@ -0,0 +1,20 @@
+terraform {
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 5.7.0"
+ }
+ null = {
+ source = "hashicorp/null"
+ version = ">= 3.2.1"
+ }
+ tls = {
+ source = "hashicorp/tls"
+ version = ">= 4.0.4"
+ }
+ vault = {
+ version = ">= 3.15.2"
+ }
+ }
+ required_version = ">= 1.4.2"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-2n-c7gn/hosts.tftpl b/fdio.infra.terraform/terraform-aws-2n-c7gn/hosts.tftpl
new file mode 100644
index 0000000000..cb36dbb138
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-2n-c7gn/hosts.tftpl
@@ -0,0 +1,8 @@
+all:
+ children:
+ tg:
+ hosts:
+ ${tg_public_ip}
+ sut:
+ hosts:
+ ${dut1_public_ip} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-2n-c7gn/main.tf b/fdio.infra.terraform/terraform-aws-2n-c7gn/main.tf
new file mode 100644
index 0000000000..5d9899773b
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-2n-c7gn/main.tf
@@ -0,0 +1,335 @@
+data "vault_aws_access_credentials" "creds" {
+ backend = "${var.vault-name}-path"
+ role = "${var.vault-name}-role"
+}
+
+locals {
+ ansible_python_executable = "/usr/bin/python3"
+ availability_zone = "${var.region}a"
+ name = "csit-vpc"
+ environment = "csit-vpc-environment"
+ key_pair_key_name = "${var.resource_prefix}-${var.testbed_name}-pk"
+ placement_group_name = "${var.resource_prefix}-${var.testbed_name}-pg"
+ security_group_name = "${var.resource_prefix}-${var.testbed_name}-sg"
+ testbed_name = "testbed1"
+ topology_name = "2n-c7gn"
+ tg_name = "${var.resource_prefix}-${var.testbed_name}-tg"
+ sut1_name = "${var.resource_prefix}-${var.testbed_name}-sut1"
+}
+
+# Create VPC
+module "vpc" {
+ source = "../terraform-aws-vpc"
+ security_group_name = local.security_group_name
+ subnet_availability_zone = local.availability_zone
+ tags_name = local.name
+ tags_environment = local.environment
+ vpc_enable_dns_hostnames = false
+}
+
+# Create Subnet
+module "subnet_b" {
+ source = "../terraform-aws-subnet"
+ subnet_cidr_block = "192.168.10.0/24"
+ subnet_ipv6_cidr_block = cidrsubnet(module.vpc.vpc_ipv6_cidr_block, 8, 2)
+ subnet_availability_zone = local.availability_zone
+ tags_name = local.name
+ tags_environment = local.environment
+ subnet_vpc_id = module.vpc.vpc_id
+}
+
+module "subnet_d" {
+ source = "../terraform-aws-subnet"
+ subnet_cidr_block = "192.168.20.0/24"
+ subnet_ipv6_cidr_block = cidrsubnet(module.vpc.vpc_ipv6_cidr_block, 8, 4)
+ subnet_availability_zone = local.availability_zone
+ tags_name = local.name
+ tags_environment = local.environment
+ subnet_vpc_id = module.vpc.vpc_id
+}
+
+# Create Private Key
+module "private_key" {
+ source = "pmikus/private-key/tls"
+ version = "4.0.4"
+
+ private_key_algorithm = var.private_key_algorithm
+}
+
+# Create Key Pair
+module "key_pair" {
+ source = "pmikus/key-pair/aws"
+ version = "5.7.0"
+
+ key_pair_key_name = local.key_pair_key_name
+ key_pair_public_key = module.private_key.public_key_openssh
+
+ key_pair_tags = {
+ "Environment" = local.environment
+ }
+}
+
+# Create Placement Group
+resource "aws_placement_group" "placement_group" {
+ name = local.placement_group_name
+ strategy = var.placement_group_strategy
+}
+
+# Create Instance
+resource "aws_instance" "tg" {
+ depends_on = [
+ module.vpc,
+ aws_placement_group.placement_group
+ ]
+ ami = var.tg_ami
+ availability_zone = local.availability_zone
+ associate_public_ip_address = var.tg_associate_public_ip_address
+ instance_initiated_shutdown_behavior = var.tg_instance_initiated_shutdown_behavior
+ instance_type = var.tg_instance_type
+ key_name = module.key_pair.key_pair_key_name
+ placement_group = aws_placement_group.placement_group.id
+ private_ip = var.tg_private_ip
+ source_dest_check = var.tg_source_dest_check
+ subnet_id = module.vpc.vpc_subnet_id
+ vpc_security_group_ids = [module.vpc.vpc_security_group_id]
+ # host_id = "1"
+
+ root_block_device {
+ delete_on_termination = true
+ volume_size = 50
+ }
+
+ tags = {
+ "Name" = local.tg_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "tg_if1" {
+ depends_on = [
+ module.subnet_b,
+ aws_instance.tg
+ ]
+ private_ip = var.tg_if1_private_ip
+ private_ips = [var.tg_if1_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.tg_source_dest_check
+ subnet_id = module.subnet_b.subnet_id
+
+ attachment {
+ instance = aws_instance.tg.id
+ device_index = 1
+ }
+
+ tags = {
+ "Name" = local.tg_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "tg_if2" {
+ depends_on = [
+ module.subnet_d,
+ aws_instance.tg
+ ]
+ private_ip = var.tg_if2_private_ip
+ private_ips = [var.tg_if2_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.tg_source_dest_check
+ subnet_id = module.subnet_d.subnet_id
+
+ attachment {
+ instance = aws_instance.tg.id
+ device_index = 2
+ }
+
+ tags = {
+ "Name" = local.tg_name
+ "Environment" = local.environment
+ }
+}
+
+data "aws_network_interface" "tg_if1" {
+ id = aws_network_interface.tg_if1.id
+}
+
+data "aws_network_interface" "tg_if2" {
+ id = aws_network_interface.tg_if2.id
+}
+
+resource "aws_route" "route_tg_if1" {
+ depends_on = [
+ aws_instance.tg
+ ]
+ destination_cidr_block = var.destination_cidr_block_tg_if1
+ network_interface_id = aws_instance.tg.primary_network_interface_id
+ route_table_id = module.vpc.vpc_main_route_table_id
+}
+
+resource "aws_route" "route_tg_if2" {
+ depends_on = [
+ aws_instance.tg
+ ]
+ destination_cidr_block = var.destination_cidr_block_tg_if2
+ network_interface_id = aws_instance.tg.primary_network_interface_id
+ route_table_id = module.vpc.vpc_main_route_table_id
+}
+
+resource "aws_instance" "sut1" {
+ depends_on = [
+ module.vpc,
+ aws_placement_group.placement_group
+ ]
+ ami = var.sut1_ami
+ availability_zone = local.availability_zone
+ associate_public_ip_address = var.sut1_associate_public_ip_address
+ instance_initiated_shutdown_behavior = var.sut1_instance_initiated_shutdown_behavior
+ instance_type = var.sut1_instance_type
+ key_name = module.key_pair.key_pair_key_name
+ placement_group = aws_placement_group.placement_group.id
+ private_ip = var.sut1_private_ip
+ source_dest_check = var.sut1_source_dest_check
+ subnet_id = module.vpc.vpc_subnet_id
+ vpc_security_group_ids = [module.vpc.vpc_security_group_id]
+ # host_id = "2"
+
+ root_block_device {
+ delete_on_termination = true
+ volume_size = 50
+ }
+
+ tags = {
+ "Name" = local.sut1_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "sut1_if1" {
+ depends_on = [
+ module.subnet_b,
+ aws_instance.sut1
+ ]
+ private_ip = var.sut1_if1_private_ip
+ private_ips = [var.sut1_if1_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.sut1_source_dest_check
+ subnet_id = module.subnet_b.subnet_id
+
+ attachment {
+ instance = aws_instance.sut1.id
+ device_index = 1
+ }
+
+ tags = {
+ "Name" = local.sut1_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "sut1_if2" {
+ depends_on = [
+ module.subnet_d,
+ aws_instance.sut1
+ ]
+ private_ip = var.sut1_if2_private_ip
+ private_ips = [var.sut1_if2_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.sut1_source_dest_check
+ subnet_id = module.subnet_d.subnet_id
+
+ attachment {
+ instance = aws_instance.sut1.id
+ device_index = 2
+ }
+
+ tags = {
+ "Name" = local.sut1_name
+ "Environment" = local.environment
+ }
+}
+
+data "aws_network_interface" "sut1_if1" {
+ id = aws_network_interface.sut1_if1.id
+}
+
+data "aws_network_interface" "sut1_if2" {
+ id = aws_network_interface.sut1_if2.id
+}
+
+resource "null_resource" "deploy_tg" {
+ depends_on = [
+ aws_instance.tg,
+ aws_network_interface.tg_if1,
+ aws_network_interface.tg_if2,
+ aws_instance.sut1,
+ aws_network_interface.sut1_if1,
+ aws_network_interface.sut1_if2
+ ]
+
+ connection {
+ user = "ubuntu"
+ host = aws_instance.tg.public_ip
+ private_key = module.private_key.private_key_pem
+ }
+
+ provisioner "remote-exec" {
+ inline = var.first_run_commands
+ }
+}
+
+resource "null_resource" "deploy_sut1" {
+ depends_on = [
+ aws_instance.tg,
+ aws_network_interface.tg_if1,
+ aws_network_interface.tg_if2,
+ aws_instance.sut1,
+ aws_network_interface.sut1_if1,
+ aws_network_interface.sut1_if2
+ ]
+
+ connection {
+ user = "ubuntu"
+ host = aws_instance.sut1.public_ip
+ private_key = module.private_key.private_key_pem
+ }
+
+ provisioner "remote-exec" {
+ inline = var.first_run_commands
+ }
+}
+
+resource "local_file" "topology_file" {
+ depends_on = [
+ aws_instance.tg,
+ aws_instance.sut1
+ ]
+
+ content = templatefile(
+ "${path.module}/topology-${local.topology_name}.tftpl",
+ {
+ tg_if1_mac = data.aws_network_interface.tg_if1.mac_address
+ tg_if2_mac = data.aws_network_interface.tg_if2.mac_address
+ dut1_if1_mac = data.aws_network_interface.sut1_if1.mac_address
+ dut1_if2_mac = data.aws_network_interface.sut1_if2.mac_address
+ tg_public_ip = aws_instance.tg.public_ip
+ dut1_public_ip = aws_instance.sut1.public_ip
+ }
+ )
+ filename = "${path.module}/../../topologies/available/${local.topology_name}-${local.testbed_name}.yaml"
+}
+
+resource "local_file" "hosts" {
+ depends_on = [
+ aws_instance.tg,
+ aws_instance.sut1
+ ]
+
+ content = templatefile(
+ "${path.module}/hosts.tftpl",
+ {
+ tg_public_ip = aws_instance.tg.public_ip
+ dut1_public_ip = aws_instance.sut1.public_ip
+ }
+ )
+ filename = "${path.module}/../../fdio.infra.ansible/inventories/cloud_inventory/hosts.yaml"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-2n-c7gn/output.tf b/fdio.infra.terraform/terraform-aws-2n-c7gn/output.tf
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-2n-c7gn/output.tf
diff --git a/fdio.infra.terraform/terraform-aws-2n-c7gn/providers.tf b/fdio.infra.terraform/terraform-aws-2n-c7gn/providers.tf
new file mode 100644
index 0000000000..d0e7490d38
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-2n-c7gn/providers.tf
@@ -0,0 +1,11 @@
+provider "aws" {
+ region = var.region
+ access_key = data.vault_aws_access_credentials.creds.access_key
+ secret_key = data.vault_aws_access_credentials.creds.secret_key
+}
+
+provider "vault" {
+ address = "http://vault.service.consul:8200"
+ skip_tls_verify = true
+ token = "s.4z5PsufFwV3sHbCzK9Y2Cojd"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-2n-c7gn/topology-2n-c7gn.tftpl b/fdio.infra.terraform/terraform-aws-2n-c7gn/topology-2n-c7gn.tftpl
new file mode 100644
index 0000000000..a0fa5fc191
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-2n-c7gn/topology-2n-c7gn.tftpl
@@ -0,0 +1,51 @@
+---
+metadata:
+ version: 0.1
+ schema:
+ - resources/topology_schemas/2_node_topology.sch.yaml
+ - resources/topology_schemas/topology.sch.yaml
+ tags: [hw, 2-node]
+
+nodes:
+ TG:
+ type: TG
+ subtype: TREX
+ host: "${tg_public_ip}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ interfaces:
+ port1:
+ # tg_instance/p1 - 100GE port1 on ENA NIC.
+ mac_address: "${tg_if1_mac}"
+ pci_address: "0000:00:06.0"
+ link: link1
+ model: Amazon-Nitro-100G
+ port2:
+ # tg_instance/p2 - 100GE port2 on ENA NIC.
+ mac_address: "${tg_if2_mac}"
+ pci_address: "0000:00:07.0"
+ link: link2
+ model: Amazon-Nitro-100G
+ DUT1:
+ type: DUT
+ host: "${dut1_public_ip}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ uio_driver: vfio-pci
+ interfaces:
+ port1:
+ # dut1_instance/p1 - 100GE port1 on ENA NIC.
+ mac_address: "${dut1_if1_mac}"
+ pci_address: "0000:00:06.0"
+ link: link1
+ model: Amazon-Nitro-100G
+ port2:
+ # dut1_instance/p2 - 100GE port2 on ENA NIC.
+ mac_address: "${dut1_if2_mac}"
+ pci_address: "0000:00:07.0"
+ link: link2
+ model: Amazon-Nitro-100G \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-2n-c7gn/variables.tf b/fdio.infra.terraform/terraform-aws-2n-c7gn/variables.tf
new file mode 100644
index 0000000000..2a80b86936
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-2n-c7gn/variables.tf
@@ -0,0 +1,168 @@
+variable "vault-name" {
+ default = "dynamic-aws-creds-vault-fdio-csit-jenkins"
+}
+
+variable "region" {
+ description = "AWS Region."
+ type = string
+ default = "eu-west-1"
+}
+
+variable "resource_prefix" {
+ description = "Resources name prefix."
+ type = string
+ default = "csit-2n-c7gn"
+}
+
+variable "testbed_name" {
+ description = "Testbed name."
+ type = string
+ default = "testbed1"
+}
+
+# Variables for Private Key
+variable "private_key_algorithm" {
+ description = "The name of the algorithm to use for the key."
+ type = string
+ default = "ED25519"
+}
+
+# Variables for Placement Group
+variable "placement_group_strategy" {
+ description = "The placement strategy. Can be cluster, partition or spread."
+ type = string
+ default = "cluster"
+}
+
+# Variables for Instance
+variable "tg_ami" {
+ description = "AMI to use for the instance."
+ type = string
+ default = "ami-0b5aa26a6e4192705"
+}
+
+variable "tg_associate_public_ip_address" {
+ description = "Whether to associate a public IP address with an instance in a VPC."
+ type = bool
+ default = true
+}
+
+variable "tg_instance_initiated_shutdown_behavior" {
+ description = "Shutdown behavior for the instance."
+ type = string
+ default = "terminate"
+}
+
+variable "tg_instance_type" {
+ description = "The instance type to use for the instance."
+ type = string
+ default = "c6in.4xlarge"
+}
+
+variable "tg_private_ip" {
+ description = "Private IP address to associate with the instance in a VPC."
+ type = string
+ default = "192.168.0.10"
+}
+
+variable "tg_source_dest_check" {
+ description = "Controls if traffic is routed to the instance when the destination address does not match the instance."
+ type = bool
+ default = false
+}
+
+variable "sut1_ami" {
+ description = "AMI to use for the instance."
+ type = string
+ default = "ami-08930f71bd0be1085"
+}
+
+variable "sut1_associate_public_ip_address" {
+ description = "Whether to associate a public IP address with an instance in a VPC."
+ type = bool
+ default = true
+}
+
+variable "sut1_instance_initiated_shutdown_behavior" {
+ description = "Shutdown behavior for the instance."
+ type = string
+ default = "terminate"
+}
+
+variable "sut1_instance_type" {
+ description = "The instance type to use for the instance."
+ type = string
+ default = "c7gn.4xlarge"
+}
+
+variable "sut1_private_ip" {
+ description = "Private IP address to associate with the instance in a VPC."
+ type = string
+ default = "192.168.0.11"
+}
+
+variable "sut1_source_dest_check" {
+ description = "Controls if traffic is routed to the instance when the destination address does not match the instance."
+ type = bool
+ default = false
+}
+
+# Variables for Network Interface
+variable "tg_if1_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "192.168.10.254"
+}
+
+variable "tg_if2_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "192.168.20.254"
+}
+
+variable "destination_cidr_block_tg_if1" {
+ description = "The destination CIDR block."
+ type = string
+ default = "10.0.0.0/24"
+}
+
+variable "destination_cidr_block_tg_if2" {
+ description = "The destination CIDR block."
+ type = string
+ default = "20.0.0.0/24"
+}
+
+variable "sut1_if1_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "192.168.10.11"
+}
+
+variable "sut1_if2_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "192.168.20.11"
+}
+
+# Variables for Null Resource
+variable "first_run_commands" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = list(string)
+ default = [
+ "sudo sed -i 's/^PasswordAuthentication/#PasswordAuthentication/' /etc/ssh/sshd_config",
+ "sudo systemctl restart sshd",
+ "sudo useradd --create-home -s /bin/bash provisionuser",
+ "echo 'provisionuser:Csit1234' | sudo chpasswd",
+ "echo 'provisionuser ALL = (ALL) NOPASSWD: ALL' | sudo tee -a /etc/sudoers",
+ "sudo useradd --create-home -s /bin/bash testuser",
+ "echo 'testuser:Csit1234' | sudo chpasswd",
+ "echo 'testuser ALL = (ALL) NOPASSWD: ALL' | sudo tee -a /etc/sudoers"
+ ]
+}
+
+# Variables for Null Resource
+variable "ansible_topology_path" {
+ description = "Ansible topology path."
+ type = string
+ default = "../../fdio.infra.ansible/cloud_topology.yaml"
+}
diff --git a/fdio.infra.terraform/terraform-aws-2n-c7gn/versions.tf b/fdio.infra.terraform/terraform-aws-2n-c7gn/versions.tf
new file mode 100644
index 0000000000..589699691e
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-2n-c7gn/versions.tf
@@ -0,0 +1,20 @@
+terraform {
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 5.7.0"
+ }
+ null = {
+ source = "hashicorp/null"
+ version = ">= 3.2.1"
+ }
+ tls = {
+ source = "hashicorp/tls"
+ version = ">= 4.0.4"
+ }
+ vault = {
+ version = ">= 3.15.2"
+ }
+ }
+ required_version = ">= 1.4.2"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-3n-aws-c5n/hosts.tftpl b/fdio.infra.terraform/terraform-aws-3n-aws-c5n/hosts.tftpl
new file mode 100644
index 0000000000..e88c8ba510
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-3n-aws-c5n/hosts.tftpl
@@ -0,0 +1,9 @@
+all:
+ children:
+ tg:
+ hosts:
+ ${tg_public_ip}
+ sut:
+ hosts:
+ ${dut1_public_ip}
+ ${dut2_public_ip} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-3n-aws-c5n/main.tf b/fdio.infra.terraform/terraform-aws-3n-aws-c5n/main.tf
index bb8efffc96..c95c8a6588 100644
--- a/fdio.infra.terraform/terraform-aws-3n-aws-c5n/main.tf
+++ b/fdio.infra.terraform/terraform-aws-3n-aws-c5n/main.tf
@@ -59,19 +59,24 @@ module "subnet_d" {
}
# Create Private Key
-resource "tls_private_key" "private_key" {
- algorithm = var.private_key_algorithm
- ecdsa_curve = var.private_key_ecdsa_curve
- rsa_bits = var.private_key_rsa_bits
+module "private_key" {
+ source = "pmikus/private-key/tls"
+ version = "4.0.4"
+
+ private_key_algorithm = var.private_key_algorithm
}
# Create Key Pair
-resource "aws_key_pair" "key_pair" {
- depends_on = [
- tls_private_key.private_key
- ]
- key_name = local.key_pair_key_name
- public_key = tls_private_key.private_key.public_key_openssh
+module "key_pair" {
+ source = "pmikus/key-pair/aws"
+ version = "5.7.0"
+
+ key_pair_key_name = local.key_pair_key_name
+ key_pair_public_key = module.private_key.public_key_openssh
+
+ key_pair_tags = {
+ "Environment" = local.environment
+ }
}
# Create Placement Group
@@ -91,7 +96,7 @@ resource "aws_instance" "tg" {
associate_public_ip_address = var.tg_associate_public_ip_address
instance_initiated_shutdown_behavior = var.tg_instance_initiated_shutdown_behavior
instance_type = var.tg_instance_type
- key_name = aws_key_pair.key_pair.key_name
+ key_name = module.key_pair.key_pair_key_name
placement_group = aws_placement_group.placement_group.id
private_ip = var.tg_private_ip
source_dest_check = var.tg_source_dest_check
@@ -189,7 +194,7 @@ resource "aws_instance" "sut1" {
associate_public_ip_address = var.sut1_associate_public_ip_address
instance_initiated_shutdown_behavior = var.sut1_instance_initiated_shutdown_behavior
instance_type = var.sut1_instance_type
- key_name = aws_key_pair.key_pair.key_name
+ key_name = module.key_pair.key_pair_key_name
placement_group = aws_placement_group.placement_group.id
private_ip = var.sut1_private_ip
source_dest_check = var.sut1_source_dest_check
@@ -268,7 +273,7 @@ resource "aws_instance" "sut2" {
associate_public_ip_address = var.sut2_associate_public_ip_address
instance_initiated_shutdown_behavior = var.sut2_instance_initiated_shutdown_behavior
instance_type = var.sut2_instance_type
- key_name = aws_key_pair.key_pair.key_name
+ key_name = module.key_pair.key_pair_key_name
placement_group = aws_placement_group.placement_group.id
private_ip = var.sut2_private_ip
source_dest_check = var.sut2_source_dest_check
@@ -353,7 +358,7 @@ resource "null_resource" "deploy_tg" {
connection {
user = "ubuntu"
host = aws_instance.tg.public_ip
- private_key = tls_private_key.private_key.private_key_pem
+ private_key = module.private_key.private_key_pem
}
provisioner "remote-exec" {
@@ -377,7 +382,7 @@ resource "null_resource" "deploy_sut1" {
connection {
user = "ubuntu"
host = aws_instance.sut1.public_ip
- private_key = tls_private_key.private_key.private_key_pem
+ private_key = module.private_key.private_key_pem
}
provisioner "remote-exec" {
@@ -401,7 +406,7 @@ resource "null_resource" "deploy_sut2" {
connection {
user = "ubuntu"
host = aws_instance.sut2.public_ip
- private_key = tls_private_key.private_key.private_key_pem
+ private_key = module.private_key.private_key_pem
}
provisioner "remote-exec" {
@@ -409,34 +414,44 @@ resource "null_resource" "deploy_sut2" {
}
}
-resource "null_resource" "deploy_topology" {
+resource "local_file" "topology_file" {
depends_on = [
aws_instance.tg,
aws_instance.sut1,
aws_instance.sut2
]
- provisioner "ansible" {
- plays {
- playbook {
- file_path = var.ansible_topology_path
- }
- hosts = ["local"]
- extra_vars = {
- ansible_python_interpreter = local.ansible_python_executable
- testbed_name = local.testbed_name
- cloud_topology = local.topology_name
- tg_if1_mac = data.aws_network_interface.tg_if1.mac_address
- tg_if2_mac = data.aws_network_interface.tg_if2.mac_address
- dut1_if1_mac = data.aws_network_interface.sut1_if1.mac_address
- dut1_if2_mac = data.aws_network_interface.sut1_if2.mac_address
- dut2_if1_mac = data.aws_network_interface.sut2_if1.mac_address
- dut2_if2_mac = data.aws_network_interface.sut2_if2.mac_address
- tg_public_ip = aws_instance.tg.public_ip
- dut1_public_ip = aws_instance.sut1.public_ip
- dut2_public_ip = aws_instance.sut2.public_ip
- public_ip_list = "${aws_instance.tg.public_ip},${aws_instance.sut1.public_ip},${aws_instance.sut2.public_ip}"
- }
+ content = templatefile(
+ "${path.module}/topology-${local.topology_name}.tftpl",
+ {
+ tg_if1_mac = data.aws_network_interface.tg_if1.mac_address
+ tg_if2_mac = data.aws_network_interface.tg_if2.mac_address
+ dut1_if1_mac = data.aws_network_interface.sut1_if1.mac_address
+ dut1_if2_mac = data.aws_network_interface.sut1_if2.mac_address
+ dut2_if1_mac = data.aws_network_interface.sut2_if1.mac_address
+ dut2_if2_mac = data.aws_network_interface.sut2_if2.mac_address
+ tg_public_ip = aws_instance.tg.public_ip
+ dut1_public_ip = aws_instance.sut1.public_ip
+ dut2_public_ip = aws_instance.sut2.public_ip
}
- }
+ )
+ filename = "${path.module}/../../topologies/available/${local.topology_name}-${local.testbed_name}.yaml"
+}
+
+resource "local_file" "hosts" {
+ depends_on = [
+ aws_instance.tg,
+ aws_instance.sut1,
+ aws_instance.sut2
+ ]
+
+ content = templatefile(
+ "${path.module}/hosts.tftpl",
+ {
+ tg_public_ip = aws_instance.tg.public_ip
+ dut1_public_ip = aws_instance.sut1.public_ip
+ dut2_public_ip = aws_instance.sut2.public_ip
+ }
+ )
+ filename = "${path.module}/../../fdio.infra.ansible/inventories/cloud_inventory/hosts.yaml"
} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-3n-aws-c5n/providers.tf b/fdio.infra.terraform/terraform-aws-3n-aws-c5n/providers.tf
index d0e7490d38..2482ca2839 100644
--- a/fdio.infra.terraform/terraform-aws-3n-aws-c5n/providers.tf
+++ b/fdio.infra.terraform/terraform-aws-3n-aws-c5n/providers.tf
@@ -5,7 +5,7 @@ provider "aws" {
}
provider "vault" {
- address = "http://vault.service.consul:8200"
+ address = "http://10.30.51.24:8200"
skip_tls_verify = true
token = "s.4z5PsufFwV3sHbCzK9Y2Cojd"
} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-3n-aws-c5n/topology-3n-aws-c5n.tftpl b/fdio.infra.terraform/terraform-aws-3n-aws-c5n/topology-3n-aws-c5n.tftpl
new file mode 100644
index 0000000000..9886b56a8d
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-3n-aws-c5n/topology-3n-aws-c5n.tftpl
@@ -0,0 +1,72 @@
+---
+metadata:
+ version: 0.1
+ schema:
+ - resources/topology_schemas/3_node_topology.sch.yaml
+ - resources/topology_schemas/topology.sch.yaml
+ tags: [hw, 3-node]
+
+nodes:
+ TG:
+ type: TG
+ subtype: TREX
+ host: "${tg_public_ip}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ interfaces:
+ port1:
+ # tg_instance/p1 - 50GE port1 on ENA NIC.
+ mac_address: "${tg_if1_mac}"
+ pci_address: "0000:00:06.0"
+ link: link1
+ model: Amazon-Nitro-50G
+ port2:
+ # tg_instance/p2 - 50GE port2 on ENA NIC.
+ mac_address: "${tg_if2_mac}"
+ pci_address: "0000:00:07.0"
+ link: link2
+ model: Amazon-Nitro-50G
+ DUT1:
+ type: DUT
+ host: ${dut1_public_ip}
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ uio_driver: vfio-pci
+ interfaces:
+ port1:
+ # dut1_instance/p1 - 50GE port1 on ENA NIC.
+ mac_address: "${dut1_if1_mac}"
+ pci_address: "0000:00:06.0"
+ link: link1
+ model: Amazon-Nitro-50G
+ port2:
+ # dut1_instance/p2 - 50GE port2 on ENA NIC.
+ mac_address: "${dut1_if2_mac}"
+ pci_address: "0000:00:07.0"
+ link: link21
+ model: Amazon-Nitro-50G
+ DUT2:
+ type: DUT
+ host: ${dut2_public_ip}
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ uio_driver: vfio-pci
+ interfaces:
+ port1:
+ # dut2_instance/p1 - 50GE port1 on ENA NIC.
+ mac_address: "${dut2_if1_mac}"
+ pci_address: "0000:00:06.0"
+ link: link21
+ model: Amazon-Nitro-50G
+ port2:
+ # dut2_instance/p2 - 50GE port1 on ENA NIC.
+ mac_address: "${dut2_if2_mac}"
+ pci_address: "0000:00:07.0"
+ link: link2
+ model: Amazon-Nitro-50G \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-3n-aws-c5n/variables.tf b/fdio.infra.terraform/terraform-aws-3n-aws-c5n/variables.tf
index 3f6a96d66f..d64dd5b241 100644
--- a/fdio.infra.terraform/terraform-aws-3n-aws-c5n/variables.tf
+++ b/fdio.infra.terraform/terraform-aws-3n-aws-c5n/variables.tf
@@ -11,7 +11,7 @@ variable "region" {
variable "resource_prefix" {
description = "Resources name prefix."
type = string
- default = "csit-2n-aws-c5n"
+ default = "csit-3n-aws-c5n"
}
variable "testbed_name" {
@@ -24,19 +24,7 @@ variable "testbed_name" {
variable "private_key_algorithm" {
description = "The name of the algorithm to use for the key."
type = string
- default = "RSA"
-}
-
-variable "private_key_ecdsa_curve" {
- description = "When algorithm is ECDSA, the name of the elliptic curve to use."
- type = string
- default = "P521"
-}
-
-variable "private_key_rsa_bits" {
- description = "When algorithm is RSA, the size of the generated RSA key in bits."
- type = number
- default = 4096
+ default = "ED25519"
}
# Variables for Placement Group
@@ -50,7 +38,7 @@ variable "placement_group_strategy" {
variable "tg_ami" {
description = "AMI to use for the instance."
type = string
- default = "ami-01d1d62914ef00b25"
+ default = "ami-07430bfa17fd4e597"
}
variable "tg_associate_public_ip_address" {
@@ -86,7 +74,7 @@ variable "tg_source_dest_check" {
variable "sut1_ami" {
description = "AMI to use for the instance."
type = string
- default = "ami-0bfdf32a014984d8a"
+ default = "ami-0a890555652963ec2"
}
variable "sut1_associate_public_ip_address" {
@@ -122,7 +110,7 @@ variable "sut1_source_dest_check" {
variable "sut2_ami" {
description = "AMI to use for the instance."
type = string
- default = "ami-05ea90e57d2df4368"
+ default = "ami-07898402cb1fd6561"
}
variable "sut2_associate_public_ip_address" {
diff --git a/fdio.infra.terraform/terraform-aws-3n-aws-c5n/versions.tf b/fdio.infra.terraform/terraform-aws-3n-aws-c5n/versions.tf
index 0eead1fc01..589699691e 100644
--- a/fdio.infra.terraform/terraform-aws-3n-aws-c5n/versions.tf
+++ b/fdio.infra.terraform/terraform-aws-3n-aws-c5n/versions.tf
@@ -2,19 +2,19 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = ">= 4.3.0"
+ version = ">= 5.7.0"
}
null = {
source = "hashicorp/null"
- version = "~> 3.1.0"
+ version = ">= 3.2.1"
}
tls = {
source = "hashicorp/tls"
- version = "~> 3.1.0"
+ version = ">= 4.0.4"
}
vault = {
- version = ">=2.22.1"
+ version = ">= 3.15.2"
}
}
- required_version = ">= 1.0.4"
-}
+ required_version = ">= 1.4.2"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-3n-c6gn/hosts.tftpl b/fdio.infra.terraform/terraform-aws-3n-c6gn/hosts.tftpl
new file mode 100644
index 0000000000..e88c8ba510
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-3n-c6gn/hosts.tftpl
@@ -0,0 +1,9 @@
+all:
+ children:
+ tg:
+ hosts:
+ ${tg_public_ip}
+ sut:
+ hosts:
+ ${dut1_public_ip}
+ ${dut2_public_ip} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-3n-c6gn/main.tf b/fdio.infra.terraform/terraform-aws-3n-c6gn/main.tf
new file mode 100644
index 0000000000..dc0ad6a210
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-3n-c6gn/main.tf
@@ -0,0 +1,457 @@
+data "vault_aws_access_credentials" "creds" {
+ backend = "${var.vault-name}-path"
+ role = "${var.vault-name}-role"
+}
+
+locals {
+ ansible_python_executable = "/usr/bin/python3"
+ availability_zone = "eu-central-1a"
+ name = "csit-vpc"
+ environment = "csit-vpc-environment"
+ key_pair_key_name = "${var.resource_prefix}-${var.testbed_name}-pk"
+ placement_group_name = "${var.resource_prefix}-${var.testbed_name}-pg"
+ security_group_name = "${var.resource_prefix}-${var.testbed_name}-sg"
+ testbed_name = "testbed1"
+ topology_name = "3n-c6gn"
+ tg_name = "${var.resource_prefix}-${var.testbed_name}-tg"
+ sut1_name = "${var.resource_prefix}-${var.testbed_name}-sut1"
+ sut2_name = "${var.resource_prefix}-${var.testbed_name}-sut2"
+}
+
+# Create VPC
+module "vpc" {
+ source = "../terraform-aws-vpc"
+ security_group_name = local.security_group_name
+ subnet_availability_zone = local.availability_zone
+ tags_name = local.name
+ tags_environment = local.environment
+}
+
+# Create Subnet
+module "subnet_b" {
+ source = "../terraform-aws-subnet"
+ subnet_cidr_block = "192.168.10.0/24"
+ subnet_ipv6_cidr_block = cidrsubnet(module.vpc.vpc_ipv6_cidr_block, 8, 2)
+ subnet_availability_zone = local.availability_zone
+ tags_name = local.name
+ tags_environment = local.environment
+ subnet_vpc_id = module.vpc.vpc_id
+}
+
+module "subnet_c" {
+ source = "../terraform-aws-subnet"
+ subnet_cidr_block = "200.0.0.0/24"
+ subnet_ipv6_cidr_block = cidrsubnet(module.vpc.vpc_ipv6_cidr_block, 8, 3)
+ subnet_availability_zone = local.availability_zone
+ tags_name = local.name
+ tags_environment = local.environment
+ subnet_vpc_id = module.vpc.vpc_id
+}
+
+module "subnet_d" {
+ source = "../terraform-aws-subnet"
+ subnet_cidr_block = "192.168.20.0/24"
+ subnet_ipv6_cidr_block = cidrsubnet(module.vpc.vpc_ipv6_cidr_block, 8, 4)
+ subnet_availability_zone = local.availability_zone
+ tags_name = local.name
+ tags_environment = local.environment
+ subnet_vpc_id = module.vpc.vpc_id
+}
+
+# Create Private Key
+module "private_key" {
+ source = "pmikus/private-key/tls"
+ version = "4.0.4"
+
+ private_key_algorithm = var.private_key_algorithm
+}
+
+# Create Key Pair
+module "key_pair" {
+ source = "pmikus/key-pair/aws"
+ version = "5.7.0"
+
+ key_pair_key_name = local.key_pair_key_name
+ key_pair_public_key = module.private_key.public_key_openssh
+
+ key_pair_tags = {
+ "Environment" = local.environment
+ }
+}
+
+# Create Placement Group
+resource "aws_placement_group" "placement_group" {
+ name = local.placement_group_name
+ strategy = var.placement_group_strategy
+}
+
+# Create Instance
+resource "aws_instance" "tg" {
+ depends_on = [
+ module.vpc,
+ aws_placement_group.placement_group
+ ]
+ ami = var.tg_ami
+ availability_zone = local.availability_zone
+ associate_public_ip_address = var.tg_associate_public_ip_address
+ instance_initiated_shutdown_behavior = var.tg_instance_initiated_shutdown_behavior
+ instance_type = var.tg_instance_type
+ key_name = module.key_pair.key_pair_key_name
+ placement_group = aws_placement_group.placement_group.id
+ private_ip = var.tg_private_ip
+ source_dest_check = var.tg_source_dest_check
+ subnet_id = module.vpc.vpc_subnet_id
+ vpc_security_group_ids = [module.vpc.vpc_security_group_id]
+ # host_id = "1"
+
+ root_block_device {
+ delete_on_termination = true
+ volume_size = 50
+ }
+
+ tags = {
+ "Name" = local.tg_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "tg_if1" {
+ depends_on = [
+ module.subnet_b,
+ aws_instance.tg
+ ]
+ private_ip = var.tg_if1_private_ip
+ private_ips = [var.tg_if1_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.tg_source_dest_check
+ subnet_id = module.subnet_b.subnet_id
+
+ attachment {
+ instance = aws_instance.tg.id
+ device_index = 1
+ }
+
+ tags = {
+ "Name" = local.tg_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "tg_if2" {
+ depends_on = [
+ module.subnet_d,
+ aws_instance.tg
+ ]
+ private_ips = [var.tg_if2_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.tg_source_dest_check
+ subnet_id = module.subnet_d.subnet_id
+
+ attachment {
+ instance = aws_instance.tg.id
+ device_index = 2
+ }
+
+ tags = {
+ "Name" = local.tg_name
+ "Environment" = local.environment
+ }
+}
+
+data "aws_network_interface" "tg_if1" {
+ id = aws_network_interface.tg_if1.id
+}
+
+data "aws_network_interface" "tg_if2" {
+ id = aws_network_interface.tg_if2.id
+}
+
+resource "aws_route" "route_tg_if1" {
+ depends_on = [
+ aws_instance.tg
+ ]
+ destination_cidr_block = var.destination_cidr_block_tg_if1
+ network_interface_id = aws_instance.tg.primary_network_interface_id
+ route_table_id = module.vpc.vpc_main_route_table_id
+}
+
+resource "aws_route" "route_tg_if2" {
+ depends_on = [
+ aws_instance.tg
+ ]
+ destination_cidr_block = var.destination_cidr_block_tg_if2
+ network_interface_id = aws_instance.tg.primary_network_interface_id
+ route_table_id = module.vpc.vpc_main_route_table_id
+}
+
+resource "aws_instance" "sut1" {
+ depends_on = [
+ module.vpc,
+ aws_placement_group.placement_group
+ ]
+ ami = var.sut1_ami
+ availability_zone = local.availability_zone
+ associate_public_ip_address = var.sut1_associate_public_ip_address
+ instance_initiated_shutdown_behavior = var.sut1_instance_initiated_shutdown_behavior
+ instance_type = var.sut1_instance_type
+ key_name = module.key_pair.key_pair_key_name
+ placement_group = aws_placement_group.placement_group.id
+ private_ip = var.sut1_private_ip
+ source_dest_check = var.sut1_source_dest_check
+ subnet_id = module.vpc.vpc_subnet_id
+ vpc_security_group_ids = [module.vpc.vpc_security_group_id]
+ # host_id = "2"
+
+ root_block_device {
+ delete_on_termination = true
+ volume_size = 50
+ }
+
+ tags = {
+ "Name" = local.sut1_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "sut1_if1" {
+ depends_on = [
+ module.subnet_b,
+ aws_instance.sut1
+ ]
+ private_ips = [var.sut1_if1_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.sut1_source_dest_check
+ subnet_id = module.subnet_b.subnet_id
+
+ attachment {
+ instance = aws_instance.sut1.id
+ device_index = 1
+ }
+
+ tags = {
+ "Name" = local.sut1_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "sut1_if2" {
+ depends_on = [
+ module.subnet_c,
+ aws_instance.sut1
+ ]
+ private_ips = [var.sut1_if2_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.sut1_source_dest_check
+ subnet_id = module.subnet_c.subnet_id
+
+ attachment {
+ instance = aws_instance.sut1.id
+ device_index = 2
+ }
+
+ tags = {
+ "Name" = local.sut1_name
+ "Environment" = local.environment
+ }
+}
+
+data "aws_network_interface" "sut1_if1" {
+ id = aws_network_interface.sut1_if1.id
+}
+
+data "aws_network_interface" "sut1_if2" {
+ id = aws_network_interface.sut1_if2.id
+}
+
+resource "aws_instance" "sut2" {
+ depends_on = [
+ module.vpc,
+ aws_placement_group.placement_group
+ ]
+ ami = var.sut2_ami
+ availability_zone = local.availability_zone
+ associate_public_ip_address = var.sut2_associate_public_ip_address
+ instance_initiated_shutdown_behavior = var.sut2_instance_initiated_shutdown_behavior
+ instance_type = var.sut2_instance_type
+ key_name = module.key_pair.key_pair_key_name
+ placement_group = aws_placement_group.placement_group.id
+ private_ip = var.sut2_private_ip
+ source_dest_check = var.sut2_source_dest_check
+ subnet_id = module.vpc.vpc_subnet_id
+ vpc_security_group_ids = [module.vpc.vpc_security_group_id]
+ # host_id = "2"
+
+ root_block_device {
+ delete_on_termination = true
+ volume_size = 50
+ }
+
+ tags = {
+ "Name" = local.sut2_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "sut2_if1" {
+ depends_on = [
+ module.subnet_c,
+ aws_instance.sut2
+ ]
+ private_ips = [var.sut2_if1_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.sut2_source_dest_check
+ subnet_id = module.subnet_c.subnet_id
+
+ attachment {
+ instance = aws_instance.sut2.id
+ device_index = 1
+ }
+
+ tags = {
+ "Name" = local.sut2_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "sut2_if2" {
+ depends_on = [
+ module.subnet_d,
+ aws_instance.sut2
+ ]
+ private_ips = [var.sut2_if2_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.sut2_source_dest_check
+ subnet_id = module.subnet_d.subnet_id
+
+ attachment {
+ instance = aws_instance.sut2.id
+ device_index = 2
+ }
+
+ tags = {
+ "Name" = local.sut2_name
+ "Environment" = local.environment
+ }
+}
+
+data "aws_network_interface" "sut2_if1" {
+ id = aws_network_interface.sut2_if1.id
+}
+
+data "aws_network_interface" "sut2_if2" {
+ id = aws_network_interface.sut2_if2.id
+}
+
+resource "null_resource" "deploy_tg" {
+ depends_on = [
+ aws_instance.tg,
+ aws_network_interface.tg_if1,
+ aws_network_interface.tg_if2,
+ aws_instance.sut1,
+ aws_network_interface.sut1_if1,
+ aws_network_interface.sut1_if2,
+ aws_instance.sut2,
+ aws_network_interface.sut2_if1,
+ aws_network_interface.sut2_if2
+ ]
+
+ connection {
+ user = "ubuntu"
+ host = aws_instance.tg.public_ip
+ private_key = module.private_key.private_key_pem
+ }
+
+ provisioner "remote-exec" {
+ inline = var.first_run_commands
+ }
+}
+
+resource "null_resource" "deploy_sut1" {
+ depends_on = [
+ aws_instance.tg,
+ aws_network_interface.tg_if1,
+ aws_network_interface.tg_if2,
+ aws_instance.sut1,
+ aws_network_interface.sut1_if1,
+ aws_network_interface.sut1_if2,
+ aws_instance.sut2,
+ aws_network_interface.sut2_if1,
+ aws_network_interface.sut2_if2
+ ]
+
+ connection {
+ user = "ubuntu"
+ host = aws_instance.sut1.public_ip
+ private_key = module.private_key.private_key_pem
+ }
+
+ provisioner "remote-exec" {
+ inline = var.first_run_commands
+ }
+}
+
+resource "null_resource" "deploy_sut2" {
+ depends_on = [
+ aws_instance.tg,
+ aws_network_interface.tg_if1,
+ aws_network_interface.tg_if2,
+ aws_instance.sut1,
+ aws_network_interface.sut1_if1,
+ aws_network_interface.sut1_if2,
+ aws_instance.sut2,
+ aws_network_interface.sut2_if1,
+ aws_network_interface.sut2_if2
+ ]
+
+ connection {
+ user = "ubuntu"
+ host = aws_instance.sut2.public_ip
+ private_key = module.private_key.private_key_pem
+ }
+
+ provisioner "remote-exec" {
+ inline = var.first_run_commands
+ }
+}
+
+resource "local_file" "topology_file" {
+ depends_on = [
+ aws_instance.tg,
+ aws_instance.sut1,
+ aws_instance.sut2
+ ]
+
+ content = templatefile(
+ "${path.module}/topology-${local.topology_name}.tftpl",
+ {
+ tg_if1_mac = data.aws_network_interface.tg_if1.mac_address
+ tg_if2_mac = data.aws_network_interface.tg_if2.mac_address
+ dut1_if1_mac = data.aws_network_interface.sut1_if1.mac_address
+ dut1_if2_mac = data.aws_network_interface.sut1_if2.mac_address
+ dut2_if1_mac = data.aws_network_interface.sut2_if1.mac_address
+ dut2_if2_mac = data.aws_network_interface.sut2_if2.mac_address
+ tg_public_ip = aws_instance.tg.public_ip
+ dut1_public_ip = aws_instance.sut1.public_ip
+ dut2_public_ip = aws_instance.sut2.public_ip
+ }
+ )
+ filename = "${path.module}/../../topologies/available/${local.topology_name}-${local.testbed_name}.yaml"
+}
+
+resource "local_file" "hosts" {
+ depends_on = [
+ aws_instance.tg,
+ aws_instance.sut1,
+ aws_instance.sut2
+ ]
+
+ content = templatefile(
+ "${path.module}/hosts.tftpl",
+ {
+ tg_public_ip = aws_instance.tg.public_ip
+ dut1_public_ip = aws_instance.sut1.public_ip
+ dut2_public_ip = aws_instance.sut2.public_ip
+ }
+ )
+ filename = "${path.module}/../../fdio.infra.ansible/inventories/cloud_inventory/hosts.yaml"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-3n-c6gn/output.tf b/fdio.infra.terraform/terraform-aws-3n-c6gn/output.tf
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-3n-c6gn/output.tf
diff --git a/fdio.infra.terraform/terraform-aws-3n-c6gn/providers.tf b/fdio.infra.terraform/terraform-aws-3n-c6gn/providers.tf
new file mode 100644
index 0000000000..2482ca2839
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-3n-c6gn/providers.tf
@@ -0,0 +1,11 @@
+provider "aws" {
+ region = var.region
+ access_key = data.vault_aws_access_credentials.creds.access_key
+ secret_key = data.vault_aws_access_credentials.creds.secret_key
+}
+
+provider "vault" {
+ address = "http://10.30.51.24:8200"
+ skip_tls_verify = true
+ token = "s.4z5PsufFwV3sHbCzK9Y2Cojd"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-3n-c6gn/topology-3n-c6gn.tftpl b/fdio.infra.terraform/terraform-aws-3n-c6gn/topology-3n-c6gn.tftpl
new file mode 100644
index 0000000000..dc6d869a02
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-3n-c6gn/topology-3n-c6gn.tftpl
@@ -0,0 +1,73 @@
+---
+metadata:
+ version: 0.1
+ schema:
+ - resources/topology_schemas/3_node_topology.sch.yaml
+ - resources/topology_schemas/topology.sch.yaml
+ tags: [hw, 3-node]
+
+nodes:
+ TG:
+ type: TG
+ subtype: TREX
+ host: "${tg_public_ip}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ interfaces:
+ port1:
+ # tg_instance/p1 - 100GE port1 on ENA NIC.
+ mac_address: "${tg_if1_mac}"
+ pci_address: "0000:00:06.0"
+ link: link1
+ model: Amazon-Nitro-100G
+ port2:
+ # tg_instance/p2 - 100GE port2 on ENA NIC.
+ mac_address: "${tg_if2_mac}"
+ pci_address: "0000:00:07.0"
+ link: link2
+ model: Amazon-Nitro-100G
+ DUT1:
+ type: DUT
+ host: "${dut1_public_ip}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ uio_driver: vfio-pci
+ interfaces:
+ port1:
+ # dut1_instance/p1 - 100GE port1 on ENA NIC.
+ mac_address: "${dut1_if1_mac}"
+ pci_address: "0000:00:06.0"
+ link: link1
+ model: Amazon-Nitro-100G
+ port2:
+ # dut1_instance/p2 - 100GE port2 on ENA NIC.
+ mac_address: "${dut1_if2_mac}"
+ pci_address: "0000:00:07.0"
+ link: link21
+ model: Amazon-Nitro-100G
+ DUT2:
+ type: DUT
+ host: "${dut2_public_ip}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ uio_driver: vfio-pci
+ interfaces:
+ port1:
+ # dut2_instance/p1 - 100GE port1 on ENA NIC.
+ mac_address: "${dut2_if1_mac}"
+ pci_address: "0000:00:06.0"
+ link: link21
+ model: Amazon-Nitro-100G
+ port2:
+ # dut2_instance/p2 - 100GE port1 on ENA NIC.
+ mac_address: "${dut2_if2_mac}"
+ pci_address: "0000:00:07.0"
+ link: link2
+ model: Amazon-Nitro-100G
+
diff --git a/fdio.infra.terraform/terraform-aws-3n-c6gn/variables.tf b/fdio.infra.terraform/terraform-aws-3n-c6gn/variables.tf
new file mode 100644
index 0000000000..23ae7cf42a
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-3n-c6gn/variables.tf
@@ -0,0 +1,216 @@
+variable "vault-name" {
+ default = "dynamic-aws-creds-vault-fdio-csit-jenkins"
+}
+
+variable "region" {
+ description = "AWS Region."
+ type = string
+ default = "eu-central-1"
+}
+
+variable "resource_prefix" {
+ description = "Resources name prefix."
+ type = string
+ default = "csit-3n-c6gn"
+}
+
+variable "testbed_name" {
+ description = "Testbed name."
+ type = string
+ default = "testbed1"
+}
+
+# Variables for Private Key
+variable "private_key_algorithm" {
+ description = "The name of the algorithm to use for the key."
+ type = string
+ default = "ED25519"
+}
+
+# Variables for Placement Group
+variable "placement_group_strategy" {
+ description = "The placement strategy. Can be cluster, partition or spread."
+ type = string
+ default = "cluster"
+}
+
+# Variables for Instance
+variable "tg_ami" {
+ description = "AMI to use for the instance."
+ type = string
+ default = "ami-07430bfa17fd4e597"
+}
+
+variable "tg_associate_public_ip_address" {
+ description = "Whether to associate a public IP address with an instance in a VPC."
+ type = bool
+ default = true
+}
+
+variable "tg_instance_initiated_shutdown_behavior" {
+ description = "Shutdown behavior for the instance."
+ type = string
+ default = "terminate"
+}
+
+variable "tg_instance_type" {
+ description = "The instance type to use for the instance."
+ type = string
+ default = "c6in.4xlarge"
+}
+
+variable "tg_private_ip" {
+ description = "Private IP address to associate with the instance in a VPC."
+ type = string
+ default = "192.168.0.10"
+}
+
+variable "tg_source_dest_check" {
+ description = "Controls if traffic is routed to the instance when the destination address does not match the instance."
+ type = bool
+ default = false
+}
+
+variable "sut1_ami" {
+ description = "AMI to use for the instance."
+ type = string
+ default = "ami-0cebabdc14ee56909"
+}
+
+variable "sut1_associate_public_ip_address" {
+ description = "Whether to associate a public IP address with an instance in a VPC."
+ type = bool
+ default = true
+}
+
+variable "sut1_instance_initiated_shutdown_behavior" {
+ description = "Shutdown behavior for the instance."
+ type = string
+ default = "terminate"
+}
+
+variable "sut1_instance_type" {
+ description = "The instance type to use for the instance."
+ type = string
+ default = "c6gn.4xlarge"
+}
+
+variable "sut1_private_ip" {
+ description = "Private IP address to associate with the instance in a VPC."
+ type = string
+ default = "192.168.0.11"
+}
+
+variable "sut1_source_dest_check" {
+ description = "Controls if traffic is routed to the instance when the destination address does not match the instance."
+ type = bool
+ default = false
+}
+
+variable "sut2_ami" {
+ description = "AMI to use for the instance."
+ type = string
+ default = "ami-0cebabdc14ee56909"
+}
+
+variable "sut2_associate_public_ip_address" {
+ description = "Whether to associate a public IP address with an instance in a VPC."
+ type = bool
+ default = true
+}
+
+variable "sut2_instance_initiated_shutdown_behavior" {
+ description = "Shutdown behavior for the instance."
+ type = string
+ default = "terminate"
+}
+
+variable "sut2_instance_type" {
+ description = "The instance type to use for the instance."
+ type = string
+ default = "c6gn.4xlarge"
+}
+
+variable "sut2_private_ip" {
+ description = "Private IP address to associate with the instance in a VPC."
+ type = string
+ default = "192.168.0.12"
+}
+
+variable "sut2_source_dest_check" {
+ description = "Controls if traffic is routed to the instance when the destination address does not match the instance."
+ type = bool
+ default = false
+}
+
+# Variables for Network Interface
+variable "tg_if1_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "192.168.10.254"
+}
+
+variable "tg_if2_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "192.168.20.254"
+}
+
+variable "destination_cidr_block_tg_if1" {
+ description = "The destination CIDR block."
+ type = string
+ default = "10.0.0.0/24"
+}
+
+variable "destination_cidr_block_tg_if2" {
+ description = "The destination CIDR block."
+ type = string
+ default = "20.0.0.0/24"
+}
+
+variable "sut1_if1_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "192.168.10.11"
+}
+
+variable "sut1_if2_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "200.0.0.101"
+}
+
+variable "sut2_if1_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "200.0.0.102"
+}
+
+variable "sut2_if2_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "192.168.20.11"
+}
+
+# Variables for Null Resource
+variable "first_run_commands" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = list(string)
+ default = [
+ "sudo sed -i 's/^PasswordAuthentication/#PasswordAuthentication/' /etc/ssh/sshd_config",
+ "sudo systemctl restart sshd",
+ "sudo useradd --create-home -s /bin/bash provisionuser",
+ "echo 'provisionuser:Csit1234' | sudo chpasswd",
+ "echo 'provisionuser ALL = (ALL) NOPASSWD: ALL' | sudo tee -a /etc/sudoers",
+ "sudo useradd --create-home -s /bin/bash testuser",
+ "echo 'testuser:Csit1234' | sudo chpasswd",
+ "echo 'testuser ALL = (ALL) NOPASSWD: ALL' | sudo tee -a /etc/sudoers"
+ ]
+}
+
+# Variables for Null Resource
+variable "ansible_topology_path" {
+ description = "Ansible topology path."
+ type = string
+ default = "../../fdio.infra.ansible/cloud_topology.yaml"
+}
diff --git a/fdio.infra.terraform/terraform-aws-3n-c6gn/versions.tf b/fdio.infra.terraform/terraform-aws-3n-c6gn/versions.tf
new file mode 100644
index 0000000000..589699691e
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-3n-c6gn/versions.tf
@@ -0,0 +1,20 @@
+terraform {
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 5.7.0"
+ }
+ null = {
+ source = "hashicorp/null"
+ version = ">= 3.2.1"
+ }
+ tls = {
+ source = "hashicorp/tls"
+ version = ">= 4.0.4"
+ }
+ vault = {
+ version = ">= 3.15.2"
+ }
+ }
+ required_version = ">= 1.4.2"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-3n-c6in/hosts.tftpl b/fdio.infra.terraform/terraform-aws-3n-c6in/hosts.tftpl
new file mode 100644
index 0000000000..e88c8ba510
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-3n-c6in/hosts.tftpl
@@ -0,0 +1,9 @@
+all:
+ children:
+ tg:
+ hosts:
+ ${tg_public_ip}
+ sut:
+ hosts:
+ ${dut1_public_ip}
+ ${dut2_public_ip} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-3n-c6in/main.tf b/fdio.infra.terraform/terraform-aws-3n-c6in/main.tf
new file mode 100644
index 0000000000..f1bb1b0f03
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-3n-c6in/main.tf
@@ -0,0 +1,457 @@
+data "vault_aws_access_credentials" "creds" {
+ backend = "${var.vault-name}-path"
+ role = "${var.vault-name}-role"
+}
+
+locals {
+ ansible_python_executable = "/usr/bin/python3"
+ availability_zone = "eu-central-1b"
+ name = "csit-vpc"
+ environment = "csit-vpc-environment"
+ key_pair_key_name = "${var.resource_prefix}-${var.testbed_name}-pk"
+ placement_group_name = "${var.resource_prefix}-${var.testbed_name}-pg"
+ security_group_name = "${var.resource_prefix}-${var.testbed_name}-sg"
+ testbed_name = "testbed1"
+ topology_name = "3n-c6in"
+ tg_name = "${var.resource_prefix}-${var.testbed_name}-tg"
+ sut1_name = "${var.resource_prefix}-${var.testbed_name}-sut1"
+ sut2_name = "${var.resource_prefix}-${var.testbed_name}-sut2"
+}
+
+# Create VPC
+module "vpc" {
+ source = "../terraform-aws-vpc"
+ security_group_name = local.security_group_name
+ subnet_availability_zone = local.availability_zone
+ tags_name = local.name
+ tags_environment = local.environment
+}
+
+# Create Subnet
+module "subnet_b" {
+ source = "../terraform-aws-subnet"
+ subnet_cidr_block = "192.168.10.0/24"
+ subnet_ipv6_cidr_block = cidrsubnet(module.vpc.vpc_ipv6_cidr_block, 8, 2)
+ subnet_availability_zone = local.availability_zone
+ tags_name = local.name
+ tags_environment = local.environment
+ subnet_vpc_id = module.vpc.vpc_id
+}
+
+module "subnet_c" {
+ source = "../terraform-aws-subnet"
+ subnet_cidr_block = "200.0.0.0/24"
+ subnet_ipv6_cidr_block = cidrsubnet(module.vpc.vpc_ipv6_cidr_block, 8, 3)
+ subnet_availability_zone = local.availability_zone
+ tags_name = local.name
+ tags_environment = local.environment
+ subnet_vpc_id = module.vpc.vpc_id
+}
+
+module "subnet_d" {
+ source = "../terraform-aws-subnet"
+ subnet_cidr_block = "192.168.20.0/24"
+ subnet_ipv6_cidr_block = cidrsubnet(module.vpc.vpc_ipv6_cidr_block, 8, 4)
+ subnet_availability_zone = local.availability_zone
+ tags_name = local.name
+ tags_environment = local.environment
+ subnet_vpc_id = module.vpc.vpc_id
+}
+
+# Create Private Key
+module "private_key" {
+ source = "pmikus/private-key/tls"
+ version = "4.0.4"
+
+ private_key_algorithm = var.private_key_algorithm
+}
+
+# Create Key Pair
+module "key_pair" {
+ source = "pmikus/key-pair/aws"
+ version = "5.7.0"
+
+ key_pair_key_name = local.key_pair_key_name
+ key_pair_public_key = module.private_key.public_key_openssh
+
+ key_pair_tags = {
+ "Environment" = local.environment
+ }
+}
+
+# Create Placement Group
+resource "aws_placement_group" "placement_group" {
+ name = local.placement_group_name
+ strategy = var.placement_group_strategy
+}
+
+# Create Instance
+resource "aws_instance" "tg" {
+ depends_on = [
+ module.vpc,
+ aws_placement_group.placement_group
+ ]
+ ami = var.tg_ami
+ availability_zone = local.availability_zone
+ associate_public_ip_address = var.tg_associate_public_ip_address
+ instance_initiated_shutdown_behavior = var.tg_instance_initiated_shutdown_behavior
+ instance_type = var.tg_instance_type
+ key_name = module.key_pair.key_pair_key_name
+ placement_group = aws_placement_group.placement_group.id
+ private_ip = var.tg_private_ip
+ source_dest_check = var.tg_source_dest_check
+ subnet_id = module.vpc.vpc_subnet_id
+ vpc_security_group_ids = [module.vpc.vpc_security_group_id]
+ # host_id = "1"
+
+ root_block_device {
+ delete_on_termination = true
+ volume_size = 50
+ }
+
+ tags = {
+ "Name" = local.tg_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "tg_if1" {
+ depends_on = [
+ module.subnet_b,
+ aws_instance.tg
+ ]
+ private_ip = var.tg_if1_private_ip
+ private_ips = [var.tg_if1_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.tg_source_dest_check
+ subnet_id = module.subnet_b.subnet_id
+
+ attachment {
+ instance = aws_instance.tg.id
+ device_index = 1
+ }
+
+ tags = {
+ "Name" = local.tg_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "tg_if2" {
+ depends_on = [
+ module.subnet_d,
+ aws_instance.tg
+ ]
+ private_ips = [var.tg_if2_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.tg_source_dest_check
+ subnet_id = module.subnet_d.subnet_id
+
+ attachment {
+ instance = aws_instance.tg.id
+ device_index = 2
+ }
+
+ tags = {
+ "Name" = local.tg_name
+ "Environment" = local.environment
+ }
+}
+
+data "aws_network_interface" "tg_if1" {
+ id = aws_network_interface.tg_if1.id
+}
+
+data "aws_network_interface" "tg_if2" {
+ id = aws_network_interface.tg_if2.id
+}
+
+resource "aws_route" "route_tg_if1" {
+ depends_on = [
+ aws_instance.tg
+ ]
+ destination_cidr_block = var.destination_cidr_block_tg_if1
+ network_interface_id = aws_instance.tg.primary_network_interface_id
+ route_table_id = module.vpc.vpc_main_route_table_id
+}
+
+resource "aws_route" "route_tg_if2" {
+ depends_on = [
+ aws_instance.tg
+ ]
+ destination_cidr_block = var.destination_cidr_block_tg_if2
+ network_interface_id = aws_instance.tg.primary_network_interface_id
+ route_table_id = module.vpc.vpc_main_route_table_id
+}
+
+resource "aws_instance" "sut1" {
+ depends_on = [
+ module.vpc,
+ aws_placement_group.placement_group
+ ]
+ ami = var.sut1_ami
+ availability_zone = local.availability_zone
+ associate_public_ip_address = var.sut1_associate_public_ip_address
+ instance_initiated_shutdown_behavior = var.sut1_instance_initiated_shutdown_behavior
+ instance_type = var.sut1_instance_type
+ key_name = module.key_pair.key_pair_key_name
+ placement_group = aws_placement_group.placement_group.id
+ private_ip = var.sut1_private_ip
+ source_dest_check = var.sut1_source_dest_check
+ subnet_id = module.vpc.vpc_subnet_id
+ vpc_security_group_ids = [module.vpc.vpc_security_group_id]
+ # host_id = "2"
+
+ root_block_device {
+ delete_on_termination = true
+ volume_size = 50
+ }
+
+ tags = {
+ "Name" = local.sut1_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "sut1_if1" {
+ depends_on = [
+ module.subnet_b,
+ aws_instance.sut1
+ ]
+ private_ips = [var.sut1_if1_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.sut1_source_dest_check
+ subnet_id = module.subnet_b.subnet_id
+
+ attachment {
+ instance = aws_instance.sut1.id
+ device_index = 1
+ }
+
+ tags = {
+ "Name" = local.sut1_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "sut1_if2" {
+ depends_on = [
+ module.subnet_c,
+ aws_instance.sut1
+ ]
+ private_ips = [var.sut1_if2_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.sut1_source_dest_check
+ subnet_id = module.subnet_c.subnet_id
+
+ attachment {
+ instance = aws_instance.sut1.id
+ device_index = 2
+ }
+
+ tags = {
+ "Name" = local.sut1_name
+ "Environment" = local.environment
+ }
+}
+
+data "aws_network_interface" "sut1_if1" {
+ id = aws_network_interface.sut1_if1.id
+}
+
+data "aws_network_interface" "sut1_if2" {
+ id = aws_network_interface.sut1_if2.id
+}
+
+resource "aws_instance" "sut2" {
+ depends_on = [
+ module.vpc,
+ aws_placement_group.placement_group
+ ]
+ ami = var.sut2_ami
+ availability_zone = local.availability_zone
+ associate_public_ip_address = var.sut2_associate_public_ip_address
+ instance_initiated_shutdown_behavior = var.sut2_instance_initiated_shutdown_behavior
+ instance_type = var.sut2_instance_type
+ key_name = module.key_pair.key_pair_key_name
+ placement_group = aws_placement_group.placement_group.id
+ private_ip = var.sut2_private_ip
+ source_dest_check = var.sut2_source_dest_check
+ subnet_id = module.vpc.vpc_subnet_id
+ vpc_security_group_ids = [module.vpc.vpc_security_group_id]
+ # host_id = "2"
+
+ root_block_device {
+ delete_on_termination = true
+ volume_size = 50
+ }
+
+ tags = {
+ "Name" = local.sut2_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "sut2_if1" {
+ depends_on = [
+ module.subnet_c,
+ aws_instance.sut2
+ ]
+ private_ips = [var.sut2_if1_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.sut2_source_dest_check
+ subnet_id = module.subnet_c.subnet_id
+
+ attachment {
+ instance = aws_instance.sut2.id
+ device_index = 1
+ }
+
+ tags = {
+ "Name" = local.sut2_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "sut2_if2" {
+ depends_on = [
+ module.subnet_d,
+ aws_instance.sut2
+ ]
+ private_ips = [var.sut2_if2_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.sut2_source_dest_check
+ subnet_id = module.subnet_d.subnet_id
+
+ attachment {
+ instance = aws_instance.sut2.id
+ device_index = 2
+ }
+
+ tags = {
+ "Name" = local.sut2_name
+ "Environment" = local.environment
+ }
+}
+
+data "aws_network_interface" "sut2_if1" {
+ id = aws_network_interface.sut2_if1.id
+}
+
+data "aws_network_interface" "sut2_if2" {
+ id = aws_network_interface.sut2_if2.id
+}
+
+resource "null_resource" "deploy_tg" {
+ depends_on = [
+ aws_instance.tg,
+ aws_network_interface.tg_if1,
+ aws_network_interface.tg_if2,
+ aws_instance.sut1,
+ aws_network_interface.sut1_if1,
+ aws_network_interface.sut1_if2,
+ aws_instance.sut2,
+ aws_network_interface.sut2_if1,
+ aws_network_interface.sut2_if2
+ ]
+
+ connection {
+ user = "ubuntu"
+ host = aws_instance.tg.public_ip
+ private_key = module.private_key.private_key_pem
+ }
+
+ provisioner "remote-exec" {
+ inline = var.first_run_commands
+ }
+}
+
+resource "null_resource" "deploy_sut1" {
+ depends_on = [
+ aws_instance.tg,
+ aws_network_interface.tg_if1,
+ aws_network_interface.tg_if2,
+ aws_instance.sut1,
+ aws_network_interface.sut1_if1,
+ aws_network_interface.sut1_if2,
+ aws_instance.sut2,
+ aws_network_interface.sut2_if1,
+ aws_network_interface.sut2_if2
+ ]
+
+ connection {
+ user = "ubuntu"
+ host = aws_instance.sut1.public_ip
+ private_key = module.private_key.private_key_pem
+ }
+
+ provisioner "remote-exec" {
+ inline = var.first_run_commands
+ }
+}
+
+resource "null_resource" "deploy_sut2" {
+ depends_on = [
+ aws_instance.tg,
+ aws_network_interface.tg_if1,
+ aws_network_interface.tg_if2,
+ aws_instance.sut1,
+ aws_network_interface.sut1_if1,
+ aws_network_interface.sut1_if2,
+ aws_instance.sut2,
+ aws_network_interface.sut2_if1,
+ aws_network_interface.sut2_if2
+ ]
+
+ connection {
+ user = "ubuntu"
+ host = aws_instance.sut2.public_ip
+ private_key = module.private_key.private_key_pem
+ }
+
+ provisioner "remote-exec" {
+ inline = var.first_run_commands
+ }
+}
+
+resource "local_file" "topology_file" {
+ depends_on = [
+ aws_instance.tg,
+ aws_instance.sut1,
+ aws_instance.sut2
+ ]
+
+ content = templatefile(
+ "${path.module}/topology-${local.topology_name}.tftpl",
+ {
+ tg_if1_mac = data.aws_network_interface.tg_if1.mac_address
+ tg_if2_mac = data.aws_network_interface.tg_if2.mac_address
+ dut1_if1_mac = data.aws_network_interface.sut1_if1.mac_address
+ dut1_if2_mac = data.aws_network_interface.sut1_if2.mac_address
+ dut2_if1_mac = data.aws_network_interface.sut2_if1.mac_address
+ dut2_if2_mac = data.aws_network_interface.sut2_if2.mac_address
+ tg_public_ip = aws_instance.tg.public_ip
+ dut1_public_ip = aws_instance.sut1.public_ip
+ dut2_public_ip = aws_instance.sut2.public_ip
+ }
+ )
+ filename = "${path.module}/../../topologies/available/${local.topology_name}-${local.testbed_name}.yaml"
+}
+
+resource "local_file" "hosts" {
+ depends_on = [
+ aws_instance.tg,
+ aws_instance.sut1,
+ aws_instance.sut2
+ ]
+
+ content = templatefile(
+ "${path.module}/hosts.tftpl",
+ {
+ tg_public_ip = aws_instance.tg.public_ip
+ dut1_public_ip = aws_instance.sut1.public_ip
+ dut2_public_ip = aws_instance.sut2.public_ip
+ }
+ )
+ filename = "${path.module}/../../fdio.infra.ansible/inventories/cloud_inventory/hosts.yaml"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-3n-c6in/output.tf b/fdio.infra.terraform/terraform-aws-3n-c6in/output.tf
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-3n-c6in/output.tf
diff --git a/fdio.infra.terraform/terraform-aws-3n-c6in/providers.tf b/fdio.infra.terraform/terraform-aws-3n-c6in/providers.tf
new file mode 100644
index 0000000000..2482ca2839
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-3n-c6in/providers.tf
@@ -0,0 +1,11 @@
+provider "aws" {
+ region = var.region
+ access_key = data.vault_aws_access_credentials.creds.access_key
+ secret_key = data.vault_aws_access_credentials.creds.secret_key
+}
+
+provider "vault" {
+ address = "http://10.30.51.24:8200"
+ skip_tls_verify = true
+ token = "s.4z5PsufFwV3sHbCzK9Y2Cojd"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-3n-c6in/topology-3n-c6in.tftpl b/fdio.infra.terraform/terraform-aws-3n-c6in/topology-3n-c6in.tftpl
new file mode 100644
index 0000000000..2ec39d0ea9
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-3n-c6in/topology-3n-c6in.tftpl
@@ -0,0 +1,73 @@
+---
+metadata:
+ version: 0.1
+ schema:
+ - resources/topology_schemas/3_node_topology.sch.yaml
+ - resources/topology_schemas/topology.sch.yaml
+ tags: [hw, 3-node]
+
+nodes:
+ TG:
+ type: TG
+ subtype: TREX
+ host: "${tg_public_ip}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ interfaces:
+ port1:
+ # tg_instance/p1 - 200GE port1 on ENA NIC.
+ mac_address: "${tg_if1_mac}"
+ pci_address: "0000:00:06.0"
+ link: link1
+ model: Amazon-Nitro-200G
+ port2:
+ # tg_instance/p2 - 200GE port2 on ENA NIC.
+ mac_address: "${tg_if2_mac}"
+ pci_address: "0000:00:07.0"
+ link: link2
+ model: Amazon-Nitro-200G
+ DUT1:
+ type: DUT
+ host: "${dut1_public_ip}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ uio_driver: vfio-pci
+ interfaces:
+ port1:
+ # dut1_instance/p1 - 200GE port1 on ENA NIC.
+ mac_address: "${dut1_if1_mac}"
+ pci_address: "0000:00:06.0"
+ link: link1
+ model: Amazon-Nitro-200G
+ port2:
+ # dut1_instance/p2 - 200GE port2 on ENA NIC.
+ mac_address: "${dut1_if2_mac}"
+ pci_address: "0000:00:07.0"
+ link: link21
+ model: Amazon-Nitro-200G
+ DUT2:
+ type: DUT
+ host: "${dut2_public_ip}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ uio_driver: vfio-pci
+ interfaces:
+ port1:
+ # dut2_instance/p1 - 200GE port1 on ENA NIC.
+ mac_address: "${dut2_if1_mac}"
+ pci_address: "0000:00:06.0"
+ link: link21
+ model: Amazon-Nitro-200G
+ port2:
+ # dut2_instance/p2 - 200GE port1 on ENA NIC.
+ mac_address: "${dut2_if2_mac}"
+ pci_address: "0000:00:07.0"
+ link: link2
+ model: Amazon-Nitro-200G
+
diff --git a/fdio.infra.terraform/terraform-aws-3n-c6in/variables.tf b/fdio.infra.terraform/terraform-aws-3n-c6in/variables.tf
new file mode 100644
index 0000000000..3255cab3f3
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-3n-c6in/variables.tf
@@ -0,0 +1,216 @@
+variable "vault-name" {
+ default = "dynamic-aws-creds-vault-fdio-csit-jenkins"
+}
+
+variable "region" {
+ description = "AWS Region."
+ type = string
+ default = "eu-central-1"
+}
+
+variable "resource_prefix" {
+ description = "Resources name prefix."
+ type = string
+ default = "csit-3n-c6in"
+}
+
+variable "testbed_name" {
+ description = "Testbed name."
+ type = string
+ default = "testbed1"
+}
+
+# Variables for Private Key
+variable "private_key_algorithm" {
+ description = "The name of the algorithm to use for the key."
+ type = string
+ default = "ED25519"
+}
+
+# Variables for Placement Group
+variable "placement_group_strategy" {
+ description = "The placement strategy. Can be cluster, partition or spread."
+ type = string
+ default = "cluster"
+}
+
+# Variables for Instance
+variable "tg_ami" {
+ description = "AMI to use for the instance."
+ type = string
+ default = "ami-07430bfa17fd4e597"
+}
+
+variable "tg_associate_public_ip_address" {
+ description = "Whether to associate a public IP address with an instance in a VPC."
+ type = bool
+ default = true
+}
+
+variable "tg_instance_initiated_shutdown_behavior" {
+ description = "Shutdown behavior for the instance."
+ type = string
+ default = "terminate"
+}
+
+variable "tg_instance_type" {
+ description = "The instance type to use for the instance."
+ type = string
+ default = "c6in.4xlarge"
+}
+
+variable "tg_private_ip" {
+ description = "Private IP address to associate with the instance in a VPC."
+ type = string
+ default = "192.168.0.10"
+}
+
+variable "tg_source_dest_check" {
+ description = "Controls if traffic is routed to the instance when the destination address does not match the instance."
+ type = bool
+ default = false
+}
+
+variable "sut1_ami" {
+ description = "AMI to use for the instance."
+ type = string
+ default = "ami-0a890555652963ec2"
+}
+
+variable "sut1_associate_public_ip_address" {
+ description = "Whether to associate a public IP address with an instance in a VPC."
+ type = bool
+ default = true
+}
+
+variable "sut1_instance_initiated_shutdown_behavior" {
+ description = "Shutdown behavior for the instance."
+ type = string
+ default = "terminate"
+}
+
+variable "sut1_instance_type" {
+ description = "The instance type to use for the instance."
+ type = string
+ default = "c6in.4xlarge"
+}
+
+variable "sut1_private_ip" {
+ description = "Private IP address to associate with the instance in a VPC."
+ type = string
+ default = "192.168.0.11"
+}
+
+variable "sut1_source_dest_check" {
+ description = "Controls if traffic is routed to the instance when the destination address does not match the instance."
+ type = bool
+ default = false
+}
+
+variable "sut2_ami" {
+ description = "AMI to use for the instance."
+ type = string
+ default = "ami-07898402cb1fd6561"
+}
+
+variable "sut2_associate_public_ip_address" {
+ description = "Whether to associate a public IP address with an instance in a VPC."
+ type = bool
+ default = true
+}
+
+variable "sut2_instance_initiated_shutdown_behavior" {
+ description = "Shutdown behavior for the instance."
+ type = string
+ default = "terminate"
+}
+
+variable "sut2_instance_type" {
+ description = "The instance type to use for the instance."
+ type = string
+ default = "c6in.4xlarge"
+}
+
+variable "sut2_private_ip" {
+ description = "Private IP address to associate with the instance in a VPC."
+ type = string
+ default = "192.168.0.12"
+}
+
+variable "sut2_source_dest_check" {
+ description = "Controls if traffic is routed to the instance when the destination address does not match the instance."
+ type = bool
+ default = false
+}
+
+# Variables for Network Interface
+variable "tg_if1_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "192.168.10.254"
+}
+
+variable "tg_if2_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "192.168.20.254"
+}
+
+variable "destination_cidr_block_tg_if1" {
+ description = "The destination CIDR block."
+ type = string
+ default = "10.0.0.0/24"
+}
+
+variable "destination_cidr_block_tg_if2" {
+ description = "The destination CIDR block."
+ type = string
+ default = "20.0.0.0/24"
+}
+
+variable "sut1_if1_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "192.168.10.11"
+}
+
+variable "sut1_if2_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "200.0.0.101"
+}
+
+variable "sut2_if1_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "200.0.0.102"
+}
+
+variable "sut2_if2_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "192.168.20.11"
+}
+
+# Variables for Null Resource
+variable "first_run_commands" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = list(string)
+ default = [
+ "sudo sed -i 's/^PasswordAuthentication/#PasswordAuthentication/' /etc/ssh/sshd_config",
+ "sudo systemctl restart sshd",
+ "sudo useradd --create-home -s /bin/bash provisionuser",
+ "echo 'provisionuser:Csit1234' | sudo chpasswd",
+ "echo 'provisionuser ALL = (ALL) NOPASSWD: ALL' | sudo tee -a /etc/sudoers",
+ "sudo useradd --create-home -s /bin/bash testuser",
+ "echo 'testuser:Csit1234' | sudo chpasswd",
+ "echo 'testuser ALL = (ALL) NOPASSWD: ALL' | sudo tee -a /etc/sudoers"
+ ]
+}
+
+# Variables for Null Resource
+variable "ansible_topology_path" {
+ description = "Ansible topology path."
+ type = string
+ default = "../../fdio.infra.ansible/cloud_topology.yaml"
+}
diff --git a/fdio.infra.terraform/terraform-aws-3n-c6in/versions.tf b/fdio.infra.terraform/terraform-aws-3n-c6in/versions.tf
new file mode 100644
index 0000000000..589699691e
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-3n-c6in/versions.tf
@@ -0,0 +1,20 @@
+terraform {
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 5.7.0"
+ }
+ null = {
+ source = "hashicorp/null"
+ version = ">= 3.2.1"
+ }
+ tls = {
+ source = "hashicorp/tls"
+ version = ">= 4.0.4"
+ }
+ vault = {
+ version = ">= 3.15.2"
+ }
+ }
+ required_version = ">= 1.4.2"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-3n-c7gn/hosts.tftpl b/fdio.infra.terraform/terraform-aws-3n-c7gn/hosts.tftpl
new file mode 100644
index 0000000000..e88c8ba510
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-3n-c7gn/hosts.tftpl
@@ -0,0 +1,9 @@
+all:
+ children:
+ tg:
+ hosts:
+ ${tg_public_ip}
+ sut:
+ hosts:
+ ${dut1_public_ip}
+ ${dut2_public_ip} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-3n-c7gn/main.tf b/fdio.infra.terraform/terraform-aws-3n-c7gn/main.tf
new file mode 100644
index 0000000000..68d2dd9be2
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-3n-c7gn/main.tf
@@ -0,0 +1,457 @@
+data "vault_aws_access_credentials" "creds" {
+ backend = "${var.vault-name}-path"
+ role = "${var.vault-name}-role"
+}
+
+locals {
+ ansible_python_executable = "/usr/bin/python3"
+ availability_zone = "${var.region}a"
+ name = "csit-vpc"
+ environment = "csit-vpc-environment"
+ key_pair_key_name = "${var.resource_prefix}-${var.testbed_name}-pk"
+ placement_group_name = "${var.resource_prefix}-${var.testbed_name}-pg"
+ security_group_name = "${var.resource_prefix}-${var.testbed_name}-sg"
+ testbed_name = "testbed1"
+ topology_name = "3n-c7gn"
+ tg_name = "${var.resource_prefix}-${var.testbed_name}-tg"
+ sut1_name = "${var.resource_prefix}-${var.testbed_name}-sut1"
+ sut2_name = "${var.resource_prefix}-${var.testbed_name}-sut2"
+}
+
+# Create VPC
+module "vpc" {
+ source = "../terraform-aws-vpc"
+ security_group_name = local.security_group_name
+ subnet_availability_zone = local.availability_zone
+ tags_name = local.name
+ tags_environment = local.environment
+}
+
+# Create Subnet
+module "subnet_b" {
+ source = "../terraform-aws-subnet"
+ subnet_cidr_block = "192.168.10.0/24"
+ subnet_ipv6_cidr_block = cidrsubnet(module.vpc.vpc_ipv6_cidr_block, 8, 2)
+ subnet_availability_zone = local.availability_zone
+ tags_name = local.name
+ tags_environment = local.environment
+ subnet_vpc_id = module.vpc.vpc_id
+}
+
+module "subnet_c" {
+ source = "../terraform-aws-subnet"
+ subnet_cidr_block = "200.0.0.0/24"
+ subnet_ipv6_cidr_block = cidrsubnet(module.vpc.vpc_ipv6_cidr_block, 8, 3)
+ subnet_availability_zone = local.availability_zone
+ tags_name = local.name
+ tags_environment = local.environment
+ subnet_vpc_id = module.vpc.vpc_id
+}
+
+module "subnet_d" {
+ source = "../terraform-aws-subnet"
+ subnet_cidr_block = "192.168.20.0/24"
+ subnet_ipv6_cidr_block = cidrsubnet(module.vpc.vpc_ipv6_cidr_block, 8, 4)
+ subnet_availability_zone = local.availability_zone
+ tags_name = local.name
+ tags_environment = local.environment
+ subnet_vpc_id = module.vpc.vpc_id
+}
+
+# Create Private Key
+module "private_key" {
+ source = "pmikus/private-key/tls"
+ version = "4.0.4"
+
+ private_key_algorithm = var.private_key_algorithm
+}
+
+# Create Key Pair
+module "key_pair" {
+ source = "pmikus/key-pair/aws"
+ version = "5.7.0"
+
+ key_pair_key_name = local.key_pair_key_name
+ key_pair_public_key = module.private_key.public_key_openssh
+
+ key_pair_tags = {
+ "Environment" = local.environment
+ }
+}
+
+# Create Placement Group
+resource "aws_placement_group" "placement_group" {
+ name = local.placement_group_name
+ strategy = var.placement_group_strategy
+}
+
+# Create Instance
+resource "aws_instance" "tg" {
+ depends_on = [
+ module.vpc,
+ aws_placement_group.placement_group
+ ]
+ ami = var.tg_ami
+ availability_zone = local.availability_zone
+ associate_public_ip_address = var.tg_associate_public_ip_address
+ instance_initiated_shutdown_behavior = var.tg_instance_initiated_shutdown_behavior
+ instance_type = var.tg_instance_type
+ key_name = module.key_pair.key_pair_key_name
+ placement_group = aws_placement_group.placement_group.id
+ private_ip = var.tg_private_ip
+ source_dest_check = var.tg_source_dest_check
+ subnet_id = module.vpc.vpc_subnet_id
+ vpc_security_group_ids = [module.vpc.vpc_security_group_id]
+ # host_id = "1"
+
+ root_block_device {
+ delete_on_termination = true
+ volume_size = 50
+ }
+
+ tags = {
+ "Name" = local.tg_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "tg_if1" {
+ depends_on = [
+ module.subnet_b,
+ aws_instance.tg
+ ]
+ private_ip = var.tg_if1_private_ip
+ private_ips = [var.tg_if1_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.tg_source_dest_check
+ subnet_id = module.subnet_b.subnet_id
+
+ attachment {
+ instance = aws_instance.tg.id
+ device_index = 1
+ }
+
+ tags = {
+ "Name" = local.tg_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "tg_if2" {
+ depends_on = [
+ module.subnet_d,
+ aws_instance.tg
+ ]
+ private_ips = [var.tg_if2_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.tg_source_dest_check
+ subnet_id = module.subnet_d.subnet_id
+
+ attachment {
+ instance = aws_instance.tg.id
+ device_index = 2
+ }
+
+ tags = {
+ "Name" = local.tg_name
+ "Environment" = local.environment
+ }
+}
+
+data "aws_network_interface" "tg_if1" {
+ id = aws_network_interface.tg_if1.id
+}
+
+data "aws_network_interface" "tg_if2" {
+ id = aws_network_interface.tg_if2.id
+}
+
+resource "aws_route" "route_tg_if1" {
+ depends_on = [
+ aws_instance.tg
+ ]
+ destination_cidr_block = var.destination_cidr_block_tg_if1
+ network_interface_id = aws_instance.tg.primary_network_interface_id
+ route_table_id = module.vpc.vpc_main_route_table_id
+}
+
+resource "aws_route" "route_tg_if2" {
+ depends_on = [
+ aws_instance.tg
+ ]
+ destination_cidr_block = var.destination_cidr_block_tg_if2
+ network_interface_id = aws_instance.tg.primary_network_interface_id
+ route_table_id = module.vpc.vpc_main_route_table_id
+}
+
+resource "aws_instance" "sut1" {
+ depends_on = [
+ module.vpc,
+ aws_placement_group.placement_group
+ ]
+ ami = var.sut1_ami
+ availability_zone = local.availability_zone
+ associate_public_ip_address = var.sut1_associate_public_ip_address
+ instance_initiated_shutdown_behavior = var.sut1_instance_initiated_shutdown_behavior
+ instance_type = var.sut1_instance_type
+ key_name = module.key_pair.key_pair_key_name
+ placement_group = aws_placement_group.placement_group.id
+ private_ip = var.sut1_private_ip
+ source_dest_check = var.sut1_source_dest_check
+ subnet_id = module.vpc.vpc_subnet_id
+ vpc_security_group_ids = [module.vpc.vpc_security_group_id]
+ # host_id = "2"
+
+ root_block_device {
+ delete_on_termination = true
+ volume_size = 50
+ }
+
+ tags = {
+ "Name" = local.sut1_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "sut1_if1" {
+ depends_on = [
+ module.subnet_b,
+ aws_instance.sut1
+ ]
+ private_ips = [var.sut1_if1_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.sut1_source_dest_check
+ subnet_id = module.subnet_b.subnet_id
+
+ attachment {
+ instance = aws_instance.sut1.id
+ device_index = 1
+ }
+
+ tags = {
+ "Name" = local.sut1_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "sut1_if2" {
+ depends_on = [
+ module.subnet_c,
+ aws_instance.sut1
+ ]
+ private_ips = [var.sut1_if2_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.sut1_source_dest_check
+ subnet_id = module.subnet_c.subnet_id
+
+ attachment {
+ instance = aws_instance.sut1.id
+ device_index = 2
+ }
+
+ tags = {
+ "Name" = local.sut1_name
+ "Environment" = local.environment
+ }
+}
+
+data "aws_network_interface" "sut1_if1" {
+ id = aws_network_interface.sut1_if1.id
+}
+
+data "aws_network_interface" "sut1_if2" {
+ id = aws_network_interface.sut1_if2.id
+}
+
+resource "aws_instance" "sut2" {
+ depends_on = [
+ module.vpc,
+ aws_placement_group.placement_group
+ ]
+ ami = var.sut2_ami
+ availability_zone = local.availability_zone
+ associate_public_ip_address = var.sut2_associate_public_ip_address
+ instance_initiated_shutdown_behavior = var.sut2_instance_initiated_shutdown_behavior
+ instance_type = var.sut2_instance_type
+ key_name = module.key_pair.key_pair_key_name
+ placement_group = aws_placement_group.placement_group.id
+ private_ip = var.sut2_private_ip
+ source_dest_check = var.sut2_source_dest_check
+ subnet_id = module.vpc.vpc_subnet_id
+ vpc_security_group_ids = [module.vpc.vpc_security_group_id]
+ # host_id = "2"
+
+ root_block_device {
+ delete_on_termination = true
+ volume_size = 50
+ }
+
+ tags = {
+ "Name" = local.sut2_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "sut2_if1" {
+ depends_on = [
+ module.subnet_c,
+ aws_instance.sut2
+ ]
+ private_ips = [var.sut2_if1_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.sut2_source_dest_check
+ subnet_id = module.subnet_c.subnet_id
+
+ attachment {
+ instance = aws_instance.sut2.id
+ device_index = 1
+ }
+
+ tags = {
+ "Name" = local.sut2_name
+ "Environment" = local.environment
+ }
+}
+
+resource "aws_network_interface" "sut2_if2" {
+ depends_on = [
+ module.subnet_d,
+ aws_instance.sut2
+ ]
+ private_ips = [var.sut2_if2_private_ip]
+ security_groups = [module.vpc.vpc_security_group_id]
+ source_dest_check = var.sut2_source_dest_check
+ subnet_id = module.subnet_d.subnet_id
+
+ attachment {
+ instance = aws_instance.sut2.id
+ device_index = 2
+ }
+
+ tags = {
+ "Name" = local.sut2_name
+ "Environment" = local.environment
+ }
+}
+
+data "aws_network_interface" "sut2_if1" {
+ id = aws_network_interface.sut2_if1.id
+}
+
+data "aws_network_interface" "sut2_if2" {
+ id = aws_network_interface.sut2_if2.id
+}
+
+resource "null_resource" "deploy_tg" {
+ depends_on = [
+ aws_instance.tg,
+ aws_network_interface.tg_if1,
+ aws_network_interface.tg_if2,
+ aws_instance.sut1,
+ aws_network_interface.sut1_if1,
+ aws_network_interface.sut1_if2,
+ aws_instance.sut2,
+ aws_network_interface.sut2_if1,
+ aws_network_interface.sut2_if2
+ ]
+
+ connection {
+ user = "ubuntu"
+ host = aws_instance.tg.public_ip
+ private_key = module.private_key.private_key_pem
+ }
+
+ provisioner "remote-exec" {
+ inline = var.first_run_commands
+ }
+}
+
+resource "null_resource" "deploy_sut1" {
+ depends_on = [
+ aws_instance.tg,
+ aws_network_interface.tg_if1,
+ aws_network_interface.tg_if2,
+ aws_instance.sut1,
+ aws_network_interface.sut1_if1,
+ aws_network_interface.sut1_if2,
+ aws_instance.sut2,
+ aws_network_interface.sut2_if1,
+ aws_network_interface.sut2_if2
+ ]
+
+ connection {
+ user = "ubuntu"
+ host = aws_instance.sut1.public_ip
+ private_key = module.private_key.private_key_pem
+ }
+
+ provisioner "remote-exec" {
+ inline = var.first_run_commands
+ }
+}
+
+resource "null_resource" "deploy_sut2" {
+ depends_on = [
+ aws_instance.tg,
+ aws_network_interface.tg_if1,
+ aws_network_interface.tg_if2,
+ aws_instance.sut1,
+ aws_network_interface.sut1_if1,
+ aws_network_interface.sut1_if2,
+ aws_instance.sut2,
+ aws_network_interface.sut2_if1,
+ aws_network_interface.sut2_if2
+ ]
+
+ connection {
+ user = "ubuntu"
+ host = aws_instance.sut2.public_ip
+ private_key = module.private_key.private_key_pem
+ }
+
+ provisioner "remote-exec" {
+ inline = var.first_run_commands
+ }
+}
+
+resource "local_file" "topology_file" {
+ depends_on = [
+ aws_instance.tg,
+ aws_instance.sut1,
+ aws_instance.sut2
+ ]
+
+ content = templatefile(
+ "${path.module}/topology-${local.topology_name}.tftpl",
+ {
+ tg_if1_mac = data.aws_network_interface.tg_if1.mac_address
+ tg_if2_mac = data.aws_network_interface.tg_if2.mac_address
+ dut1_if1_mac = data.aws_network_interface.sut1_if1.mac_address
+ dut1_if2_mac = data.aws_network_interface.sut1_if2.mac_address
+ dut2_if1_mac = data.aws_network_interface.sut2_if1.mac_address
+ dut2_if2_mac = data.aws_network_interface.sut2_if2.mac_address
+ tg_public_ip = aws_instance.tg.public_ip
+ dut1_public_ip = aws_instance.sut1.public_ip
+ dut2_public_ip = aws_instance.sut2.public_ip
+ }
+ )
+ filename = "${path.module}/../../topologies/available/${local.topology_name}-${local.testbed_name}.yaml"
+}
+
+resource "local_file" "hosts" {
+ depends_on = [
+ aws_instance.tg,
+ aws_instance.sut1,
+ aws_instance.sut2
+ ]
+
+ content = templatefile(
+ "${path.module}/hosts.tftpl",
+ {
+ tg_public_ip = aws_instance.tg.public_ip
+ dut1_public_ip = aws_instance.sut1.public_ip
+ dut2_public_ip = aws_instance.sut2.public_ip
+ }
+ )
+ filename = "${path.module}/../../fdio.infra.ansible/inventories/cloud_inventory/hosts.yaml"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-3n-c7gn/output.tf b/fdio.infra.terraform/terraform-aws-3n-c7gn/output.tf
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-3n-c7gn/output.tf
diff --git a/fdio.infra.terraform/terraform-aws-3n-c7gn/providers.tf b/fdio.infra.terraform/terraform-aws-3n-c7gn/providers.tf
new file mode 100644
index 0000000000..2482ca2839
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-3n-c7gn/providers.tf
@@ -0,0 +1,11 @@
+provider "aws" {
+ region = var.region
+ access_key = data.vault_aws_access_credentials.creds.access_key
+ secret_key = data.vault_aws_access_credentials.creds.secret_key
+}
+
+provider "vault" {
+ address = "http://10.30.51.24:8200"
+ skip_tls_verify = true
+ token = "s.4z5PsufFwV3sHbCzK9Y2Cojd"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-3n-c7gn/topology-3n-c7gn.tftpl b/fdio.infra.terraform/terraform-aws-3n-c7gn/topology-3n-c7gn.tftpl
new file mode 100644
index 0000000000..65106da556
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-3n-c7gn/topology-3n-c7gn.tftpl
@@ -0,0 +1,72 @@
+---
+metadata:
+ version: 0.1
+ schema:
+ - resources/topology_schemas/3_node_topology.sch.yaml
+ - resources/topology_schemas/topology.sch.yaml
+ tags: [hw, 3-node]
+
+nodes:
+ TG:
+ type: TG
+ subtype: TREX
+ host: "${tg_public_ip}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ interfaces:
+ port1:
+ # tg_instance/p1 - 100GE port1 on ENA NIC.
+ mac_address: "${tg_if1_mac}"
+ pci_address: "0000:00:06.0"
+ link: link1
+ model: Amazon-Nitro-100G
+ port2:
+ # tg_instance/p2 - 100GE port2 on ENA NIC.
+ mac_address: "${tg_if2_mac}"
+ pci_address: "0000:00:07.0"
+ link: link2
+ model: Amazon-Nitro-100G
+ DUT1:
+ type: DUT
+ host: "${dut1_public_ip}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ uio_driver: vfio-pci
+ interfaces:
+ port1:
+ # dut1_instance/p1 - 100GE port1 on ENA NIC.
+ mac_address: "${dut1_if1_mac}"
+ pci_address: "0000:00:06.0"
+ link: link1
+ model: Amazon-Nitro-100G
+ port2:
+ # dut1_instance/p2 - 100GE port2 on ENA NIC.
+ mac_address: "${dut1_if2_mac}"
+ pci_address: "0000:00:07.0"
+ link: link21
+ model: Amazon-Nitro-100G
+ DUT2:
+ type: DUT
+ host: "${dut2_public_ip}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ uio_driver: vfio-pci
+ interfaces:
+ port1:
+ # dut2_instance/p1 - 100GE port1 on ENA NIC.
+ mac_address: "${dut2_if1_mac}"
+ pci_address: "0000:00:06.0"
+ link: link21
+ model: Amazon-Nitro-100G
+ port2:
+ # dut2_instance/p2 - 100GE port1 on ENA NIC.
+ mac_address: "${dut2_if2_mac}"
+ pci_address: "0000:00:07.0"
+ link: link2
+ model: Amazon-Nitro-100G \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-3n-c7gn/variables.tf b/fdio.infra.terraform/terraform-aws-3n-c7gn/variables.tf
new file mode 100644
index 0000000000..3ad51afeba
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-3n-c7gn/variables.tf
@@ -0,0 +1,216 @@
+variable "vault-name" {
+ default = "dynamic-aws-creds-vault-fdio-csit-jenkins"
+}
+
+variable "region" {
+ description = "AWS Region."
+ type = string
+ default = "eu-west-1"
+}
+
+variable "resource_prefix" {
+ description = "Resources name prefix."
+ type = string
+ default = "csit-3n-c7gn"
+}
+
+variable "testbed_name" {
+ description = "Testbed name."
+ type = string
+ default = "testbed1"
+}
+
+# Variables for Private Key
+variable "private_key_algorithm" {
+ description = "The name of the algorithm to use for the key."
+ type = string
+ default = "ED25519"
+}
+
+# Variables for Placement Group
+variable "placement_group_strategy" {
+ description = "The placement strategy. Can be cluster, partition or spread."
+ type = string
+ default = "cluster"
+}
+
+# Variables for Instance
+variable "tg_ami" {
+ description = "AMI to use for the instance."
+ type = string
+ default = "ami-0b5aa26a6e4192705"
+}
+
+variable "tg_associate_public_ip_address" {
+ description = "Whether to associate a public IP address with an instance in a VPC."
+ type = bool
+ default = true
+}
+
+variable "tg_instance_initiated_shutdown_behavior" {
+ description = "Shutdown behavior for the instance."
+ type = string
+ default = "terminate"
+}
+
+variable "tg_instance_type" {
+ description = "The instance type to use for the instance."
+ type = string
+ default = "c6in.4xlarge"
+}
+
+variable "tg_private_ip" {
+ description = "Private IP address to associate with the instance in a VPC."
+ type = string
+ default = "192.168.0.10"
+}
+
+variable "tg_source_dest_check" {
+ description = "Controls if traffic is routed to the instance when the destination address does not match the instance."
+ type = bool
+ default = false
+}
+
+variable "sut1_ami" {
+ description = "AMI to use for the instance."
+ type = string
+ default = "ami-08930f71bd0be1085"
+}
+
+variable "sut1_associate_public_ip_address" {
+ description = "Whether to associate a public IP address with an instance in a VPC."
+ type = bool
+ default = true
+}
+
+variable "sut1_instance_initiated_shutdown_behavior" {
+ description = "Shutdown behavior for the instance."
+ type = string
+ default = "terminate"
+}
+
+variable "sut1_instance_type" {
+ description = "The instance type to use for the instance."
+ type = string
+ default = "c7gn.4xlarge"
+}
+
+variable "sut1_private_ip" {
+ description = "Private IP address to associate with the instance in a VPC."
+ type = string
+ default = "192.168.0.11"
+}
+
+variable "sut1_source_dest_check" {
+ description = "Controls if traffic is routed to the instance when the destination address does not match the instance."
+ type = bool
+ default = false
+}
+
+variable "sut2_ami" {
+ description = "AMI to use for the instance."
+ type = string
+ default = "ami-08930f71bd0be1085"
+}
+
+variable "sut2_associate_public_ip_address" {
+ description = "Whether to associate a public IP address with an instance in a VPC."
+ type = bool
+ default = true
+}
+
+variable "sut2_instance_initiated_shutdown_behavior" {
+ description = "Shutdown behavior for the instance."
+ type = string
+ default = "terminate"
+}
+
+variable "sut2_instance_type" {
+ description = "The instance type to use for the instance."
+ type = string
+ default = "c7gn.4xlarge"
+}
+
+variable "sut2_private_ip" {
+ description = "Private IP address to associate with the instance in a VPC."
+ type = string
+ default = "192.168.0.12"
+}
+
+variable "sut2_source_dest_check" {
+ description = "Controls if traffic is routed to the instance when the destination address does not match the instance."
+ type = bool
+ default = false
+}
+
+# Variables for Network Interface
+variable "tg_if1_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "192.168.10.254"
+}
+
+variable "tg_if2_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "192.168.20.254"
+}
+
+variable "destination_cidr_block_tg_if1" {
+ description = "The destination CIDR block."
+ type = string
+ default = "10.0.0.0/24"
+}
+
+variable "destination_cidr_block_tg_if2" {
+ description = "The destination CIDR block."
+ type = string
+ default = "20.0.0.0/24"
+}
+
+variable "sut1_if1_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "192.168.10.11"
+}
+
+variable "sut1_if2_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "200.0.0.101"
+}
+
+variable "sut2_if1_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "200.0.0.102"
+}
+
+variable "sut2_if2_private_ip" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = string
+ default = "192.168.20.11"
+}
+
+# Variables for Null Resource
+variable "first_run_commands" {
+ description = "List of private IPs to assign to the ENI without regard to order."
+ type = list(string)
+ default = [
+ "sudo sed -i 's/^PasswordAuthentication/#PasswordAuthentication/' /etc/ssh/sshd_config",
+ "sudo systemctl restart sshd",
+ "sudo useradd --create-home -s /bin/bash provisionuser",
+ "echo 'provisionuser:Csit1234' | sudo chpasswd",
+ "echo 'provisionuser ALL = (ALL) NOPASSWD: ALL' | sudo tee -a /etc/sudoers",
+ "sudo useradd --create-home -s /bin/bash testuser",
+ "echo 'testuser:Csit1234' | sudo chpasswd",
+ "echo 'testuser ALL = (ALL) NOPASSWD: ALL' | sudo tee -a /etc/sudoers"
+ ]
+}
+
+# Variables for Null Resource
+variable "ansible_topology_path" {
+ description = "Ansible topology path."
+ type = string
+ default = "../../fdio.infra.ansible/cloud_topology.yaml"
+}
diff --git a/fdio.infra.terraform/terraform-aws-3n-c7gn/versions.tf b/fdio.infra.terraform/terraform-aws-3n-c7gn/versions.tf
new file mode 100644
index 0000000000..589699691e
--- /dev/null
+++ b/fdio.infra.terraform/terraform-aws-3n-c7gn/versions.tf
@@ -0,0 +1,20 @@
+terraform {
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 5.7.0"
+ }
+ null = {
+ source = "hashicorp/null"
+ version = ">= 3.2.1"
+ }
+ tls = {
+ source = "hashicorp/tls"
+ version = ">= 4.0.4"
+ }
+ vault = {
+ version = ">= 3.15.2"
+ }
+ }
+ required_version = ">= 1.4.2"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-elastic-beanstalk-application-version/main.tf b/fdio.infra.terraform/terraform-aws-elastic-beanstalk-application-version/main.tf
index 4a9f473baa..ace3aebbbc 100644
--- a/fdio.infra.terraform/terraform-aws-elastic-beanstalk-application-version/main.tf
+++ b/fdio.infra.terraform/terraform-aws-elastic-beanstalk-application-version/main.tf
@@ -1,7 +1,5 @@
locals {
- bucket = "${var.application_name}-bucket"
- key = "${var.application_name}.zip"
- source = "app.zip"
+ key = "${var.application_name_version}-${uuid()}.zip"
tags = {
"Name" = "${var.application_name}"
"Environment" = "${var.application_name}"
@@ -9,23 +7,22 @@ locals {
}
# Create elastic beanstalk Application Version
-resource "aws_s3_bucket" "bucket" {
- bucket = local.bucket
- tags = local.tags
-}
-
+# resource "aws_s3_bucket" "bucket" {
+# bucket = var.application_bucket
+# tags = local.tags
+# }
resource "aws_s3_object" "object" {
- bucket = aws_s3_bucket.bucket.id
+ bucket = var.application_bucket
key = local.key
- source = local.source
+ source = var.application_source
tags = local.tags
}
resource "aws_elastic_beanstalk_application_version" "application_version" {
application = var.application_name
description = var.application_description
- bucket = aws_s3_bucket.bucket.id
+ bucket = var.application_bucket
key = aws_s3_object.object.id
- name = var.application_version_name
+ name = var.application_name_version
tags = local.tags
}
diff --git a/fdio.infra.terraform/terraform-aws-elastic-beanstalk-application-version/output.tf b/fdio.infra.terraform/terraform-aws-elastic-beanstalk-application-version/output.tf
index e69de29bb2..4262bbe6cf 100644
--- a/fdio.infra.terraform/terraform-aws-elastic-beanstalk-application-version/output.tf
+++ b/fdio.infra.terraform/terraform-aws-elastic-beanstalk-application-version/output.tf
@@ -0,0 +1,3 @@
+output "application_version" {
+ value = aws_elastic_beanstalk_application_version.application_version.name
+}
diff --git a/fdio.infra.terraform/terraform-aws-elastic-beanstalk-application-version/variables.tf b/fdio.infra.terraform/terraform-aws-elastic-beanstalk-application-version/variables.tf
index 6ac2ae7bfe..8d7dd45755 100644
--- a/fdio.infra.terraform/terraform-aws-elastic-beanstalk-application-version/variables.tf
+++ b/fdio.infra.terraform/terraform-aws-elastic-beanstalk-application-version/variables.tf
@@ -1,3 +1,9 @@
+variable "application_bucket" {
+ description = "The name of the bucket."
+ type = string
+ default = "elasticbeanstalk-eu-central-1"
+}
+
variable "application_description" {
description = "Short description of the Application Version."
type = string
@@ -5,13 +11,19 @@ variable "application_description" {
}
variable "application_name" {
- description = "Name of the Beanstalk Application the version is associated."
+ description = "Name of the Beanstalk Application."
+ type = string
+ default = "beanstalk"
+}
+
+variable "application_name_version" {
+ description = "Version of the Beanstalk Application."
type = string
- default = "Beanstalk"
+ default = "beanstalk-1"
}
-variable "application_version_name" {
- description = "Unique name for the this Application Version."
+variable "application_source" {
+ description = "The source file with application code."
type = string
- default = "Beanstalk Version"
+ default = "app.zip"
}
diff --git a/fdio.infra.terraform/terraform-aws-elastic-beanstalk-application-version/versions.tf b/fdio.infra.terraform/terraform-aws-elastic-beanstalk-application-version/versions.tf
index 66b9c17f9b..1c82745471 100644
--- a/fdio.infra.terraform/terraform-aws-elastic-beanstalk-application-version/versions.tf
+++ b/fdio.infra.terraform/terraform-aws-elastic-beanstalk-application-version/versions.tf
@@ -2,8 +2,8 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = "~> 4.3.0"
+ version = ">= 5.7.0"
}
}
- required_version = ">= 1.1.4"
+ required_version = ">= 1.4.2"
}
diff --git a/fdio.infra.terraform/terraform-aws-elastic-beanstalk-application/versions.tf b/fdio.infra.terraform/terraform-aws-elastic-beanstalk-application/versions.tf
index 66b9c17f9b..8ad3fd72c6 100644
--- a/fdio.infra.terraform/terraform-aws-elastic-beanstalk-application/versions.tf
+++ b/fdio.infra.terraform/terraform-aws-elastic-beanstalk-application/versions.tf
@@ -2,8 +2,8 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = "~> 4.3.0"
+ version = ">= 5.7.0"
}
}
- required_version = ">= 1.1.4"
+ required_version = ">= 1.5.4"
}
diff --git a/fdio.infra.terraform/terraform-aws-elastic-beanstalk-environment/main.tf b/fdio.infra.terraform/terraform-aws-elastic-beanstalk-environment/main.tf
index fd570ab575..44373ed4de 100644
--- a/fdio.infra.terraform/terraform-aws-elastic-beanstalk-environment/main.tf
+++ b/fdio.infra.terraform/terraform-aws-elastic-beanstalk-environment/main.tf
@@ -3,6 +3,118 @@ locals {
"Name" = "${var.application_name}"
"Environment" = "${var.application_name}"
}
+
+ # Settings for all loadbalancer types
+ generic_elb_settings = [
+ {
+ namespace = "aws:elasticbeanstalk:environment"
+ name = "LoadBalancerType"
+ value = var.environment_loadbalancer_type
+ }
+ ]
+
+ elb_settings = [
+ {
+ namespace = "aws:ec2:vpc"
+ name = "ELBSubnets"
+ value = join(",", [aws_subnet.subnet_a.id, aws_subnet.subnet_b.id])
+ },
+ {
+ namespace = "aws:elasticbeanstalk:environment:process:default"
+ name = "Port"
+ value = var.environment_process_default_port
+ },
+ {
+ namespace = "aws:elasticbeanstalk:environment:process:default"
+ name = "Protocol"
+ value = var.environment_loadbalancer_type == "network" ? "TCP" : "HTTP"
+ },
+ {
+ namespace = "aws:ec2:vpc"
+ name = "ELBScheme"
+ value = var.environment_type == "LoadBalanced" ? var.elb_scheme : ""
+ },
+ {
+ namespace = "aws:elasticbeanstalk:environment:process:default"
+ name = "HealthCheckInterval"
+ value = var.environment_process_default_healthcheck_interval
+ },
+ {
+ namespace = "aws:elasticbeanstalk:environment:process:default"
+ name = "HealthyThresholdCount"
+ value = var.environment_process_default_healthy_threshold_count
+ },
+ {
+ namespace = "aws:elasticbeanstalk:environment:process:default"
+ name = "UnhealthyThresholdCount"
+ value = var.environment_process_default_unhealthy_threshold_count
+ }
+ ]
+
+ generic_alb_settings = [
+ {
+ namespace = "aws:elbv2:loadbalancer"
+ name = "SecurityGroups"
+ value = join(",", sort(var.environment_loadbalancer_security_groups))
+ }
+ ]
+
+ alb_settings = [
+ {
+ namespace = "aws:elbv2:listener:default"
+ name = "ListenerEnabled"
+ value = var.default_listener_enabled || var.environment_loadbalancer_ssl_certificate_id == "" ? "true" : "false"
+ },
+ {
+ namespace = "aws:elbv2:loadbalancer"
+ name = "ManagedSecurityGroup"
+ value = var.environment_loadbalancer_managed_security_group
+ },
+ {
+ namespace = "aws:elbv2:listener:443"
+ name = "ListenerEnabled"
+ value = var.environment_loadbalancer_ssl_certificate_id == "" ? "false" : "true"
+ },
+ {
+ namespace = "aws:elbv2:listener:443"
+ name = "Protocol"
+ value = "HTTPS"
+ },
+ {
+ namespace = "aws:elbv2:listener:443"
+ name = "SSLCertificateArns"
+ value = var.environment_loadbalancer_ssl_certificate_id
+ },
+ {
+ namespace = "aws:elasticbeanstalk:environment:process:default"
+ name = "HealthCheckPath"
+ value = var.application_healthcheck_url
+ },
+ {
+ namespace = "aws:elasticbeanstalk:environment:process:default"
+ name = "MatcherHTTPCode"
+ value = join(",", sort(var.default_matcher_http_code))
+ },
+ {
+ namespace = "aws:elasticbeanstalk:environment:process:default"
+ name = "HealthCheckTimeout"
+ value = var.default_health_check_timeout
+ }
+ ]
+
+ nlb_settings = [
+ {
+ namespace = "aws:elbv2:listener:default"
+ name = "ListenerEnabled"
+ value = var.default_listener_enabled
+ }
+ ]
+
+ settings_nlb = var.environment_loadbalancer_type == "network" ? concat(local.nlb_settings, local.generic_elb_settings, local.elb_settings) : []
+ settings_alb = var.environment_loadbalancer_type == "application" ? concat(local.generic_alb_settings, local.alb_settings, local.generic_elb_settings, local.elb_settings) : []
+
+ # Full set of LoadBlanacer settings.
+ elb = var.environment_tier == "WebServer" ? concat(local.settings_nlb, local.settings_alb) : []
}
# Create elastic beanstalk VPC
@@ -16,19 +128,32 @@ resource "aws_vpc" "vpc" {
}
# Create elastic beanstalk Subnets
-resource "aws_subnet" "subnet" {
+resource "aws_subnet" "subnet_a" {
depends_on = [
aws_vpc.vpc
]
- availability_zone = var.subnet_availability_zone
+ availability_zone = var.subnet_a_availability_zone
assign_ipv6_address_on_creation = true
- cidr_block = aws_vpc.vpc.cidr_block
+ cidr_block = var.subnet_a_cidr_block
ipv6_cidr_block = cidrsubnet(aws_vpc.vpc.ipv6_cidr_block, 8, 1)
map_public_ip_on_launch = true
vpc_id = aws_vpc.vpc.id
tags = local.tags
}
+resource "aws_subnet" "subnet_b" {
+ depends_on = [
+ aws_vpc.vpc
+ ]
+ availability_zone = var.subnet_b_availability_zone
+ assign_ipv6_address_on_creation = true
+ cidr_block = var.subnet_b_cidr_block
+ ipv6_cidr_block = cidrsubnet(aws_vpc.vpc.ipv6_cidr_block, 8, 2)
+ map_public_ip_on_launch = true
+ vpc_id = aws_vpc.vpc.id
+ tags = local.tags
+}
+
resource "aws_internet_gateway" "internet_gateway" {
depends_on = [
aws_vpc.vpc
@@ -308,7 +433,8 @@ resource "aws_iam_role_policy" "default" {
resource "aws_elastic_beanstalk_environment" "environment" {
depends_on = [
aws_vpc.vpc,
- aws_subnet.subnet,
+ aws_subnet.subnet_a,
+ aws_subnet.subnet_b,
aws_ssm_activation.ec2
]
application = var.environment_application
@@ -337,19 +463,7 @@ resource "aws_elastic_beanstalk_environment" "environment" {
setting {
namespace = "aws:ec2:vpc"
name = "Subnets"
- value = aws_subnet.subnet.id
- }
-
- setting {
- namespace = "aws:ec2:vpc"
- name = "ELBSubnets"
- value = aws_subnet.subnet.id
- }
-
- setting {
- namespace = "aws:ec2:vpc"
- name = "ELBScheme"
- value = var.environment_type == "LoadBalanced" ? var.elb_scheme : ""
+ value = join(",", [aws_subnet.subnet_a.id, aws_subnet.subnet_b.id])
}
setting {
@@ -359,67 +473,31 @@ resource "aws_elastic_beanstalk_environment" "environment" {
}
setting {
- namespace = "aws:elasticbeanstalk:application"
- name = "Application Healthcheck URL"
- value = "/"
- }
-
- # aws:elbv2:listener:default
- setting {
- namespace = "aws:elbv2:listener:default"
- name = "ListenerEnabled"
- value = var.default_listener_enabled
- }
-
- # aws:elasticbeanstalk:environment
- setting {
- namespace = "aws:elasticbeanstalk:environment"
- name = "LoadBalancerType"
- value = var.environment_loadbalancer_type
- }
-
- setting {
namespace = "aws:elasticbeanstalk:environment"
name = "ServiceRole"
value = aws_iam_role.service.name
}
- # aws:elasticbeanstalk:environment:process:default
- setting {
- namespace = "aws:elasticbeanstalk:environment:process:default"
- name = "HealthCheckInterval"
- value = var.environment_process_default_healthcheck_interval
- }
-
- setting {
- namespace = "aws:elasticbeanstalk:environment:process:default"
- name = "HealthyThresholdCount"
- value = var.environment_process_default_healthy_threshold_count
- }
-
- setting {
- namespace = "aws:elasticbeanstalk:environment:process:default"
- name = "Port"
- value = var.environment_process_default_port
- }
-
+ # aws:autoscaling:launchconfiguration
setting {
- namespace = "aws:elasticbeanstalk:environment:process:default"
- name = "Protocol"
- value = var.environment_loadbalancer_type == "network" ? "TCP" : "HTTP"
+ namespace = "aws:autoscaling:launchconfiguration"
+ name = "IamInstanceProfile"
+ value = aws_iam_instance_profile.ec2_iam_instance_profile.name
}
setting {
- namespace = "aws:elasticbeanstalk:environment:process:default"
- name = "UnhealthyThresholdCount"
- value = var.environment_process_default_unhealthy_threshold_count
+ namespace = "aws:autoscaling:launchconfiguration"
+ name = "DisableIMDSv1"
+ value = true
}
- # aws:autoscaling:launchconfiguration
- setting {
- namespace = "aws:autoscaling:launchconfiguration"
- name = "IamInstanceProfile"
- value = aws_iam_instance_profile.ec2_iam_instance_profile.name
+ dynamic "setting" {
+ for_each = local.elb
+ content {
+ namespace = setting.value["namespace"]
+ name = setting.value["name"]
+ value = setting.value["value"]
+ }
}
# aws:autoscaling:updatepolicy:rollingupdate
@@ -441,6 +519,12 @@ resource "aws_elastic_beanstalk_environment" "environment" {
value = var.autoscaling_updatepolicy_min_instance_in_service
}
+ setting {
+ namespace = "aws:elasticbeanstalk:application"
+ name = "Application Healthcheck URL"
+ value = var.application_healthcheck_url
+ }
+
# aws:elasticbeanstalk:command
setting {
namespace = "aws:elasticbeanstalk:command"
@@ -488,6 +572,12 @@ resource "aws_elastic_beanstalk_environment" "environment" {
value = var.managedactions_platformupdate_instance_refresh_enabled
}
+ setting {
+ namespace = "aws:elasticbeanstalk:command"
+ name = "IgnoreHealthCheck"
+ value = var.command_ignore_health_check
+ }
+
# aws:autoscaling:asg
setting {
namespace = "aws:autoscaling:asg"
diff --git a/fdio.infra.terraform/terraform-aws-elastic-beanstalk-environment/variables.tf b/fdio.infra.terraform/terraform-aws-elastic-beanstalk-environment/variables.tf
index b0c41899b7..a442215a9e 100644
--- a/fdio.infra.terraform/terraform-aws-elastic-beanstalk-environment/variables.tf
+++ b/fdio.infra.terraform/terraform-aws-elastic-beanstalk-environment/variables.tf
@@ -2,7 +2,7 @@
variable "vpc_cidr_block" {
description = "The CIDR block for the association."
type = string
- default = "192.168.0.0/24"
+ default = "10.0.0.0/16"
}
variable "vpc_enable_dns_hostnames" {
@@ -24,12 +24,30 @@ variable "vpc_instance_tenancy" {
}
# Variables for elastic beanstalk Subnet
-variable "subnet_availability_zone" {
- description = "AWS availability zone"
+variable "subnet_a_availability_zone" {
+ description = "AZ for the subnet."
type = string
default = "us-east-1a"
}
+variable "subnet_a_cidr_block" {
+ description = "The IPv4 CIDR block for the subnet."
+ type = string
+ default = "10.0.0.0/20"
+}
+
+variable "subnet_b_availability_zone" {
+ description = "AZ for the subnet."
+ type = string
+ default = "us-east-1b"
+}
+
+variable "subnet_b_cidr_block" {
+ description = "The IPv4 CIDR block for the subnet."
+ type = string
+ default = "10.0.16.0/20"
+}
+
# Variables for elastic beanstalk Application
variable "environment_application" {
description = "The name of the application, must be unique within account."
@@ -135,7 +153,25 @@ variable "default_listener_enabled" {
variable "environment_loadbalancer_type" {
description = "Load Balancer type, e.g. 'application' or 'classic'."
type = string
- default = "network"
+ default = "classic"
+}
+
+variable "environment_loadbalancer_security_groups" {
+ description = "Load balancer security groups"
+ type = list(string)
+ default = []
+}
+
+variable "environment_loadbalancer_managed_security_group" {
+ description = "Load balancer managed security group"
+ type = string
+ default = ""
+}
+
+variable "environment_loadbalancer_ssl_certificate_id" {
+ type = string
+ default = ""
+ description = "Load Balancer SSL certificate ARN. The certificate must be present in AWS Certificate Manager"
}
# aws:elasticbeanstalk:environment:process:default
@@ -182,6 +218,30 @@ variable "autoscaling_updatepolicy_min_instance_in_service" {
default = 1
}
+variable "application_healthcheck_url" {
+ description = "The path where health check requests are sent to."
+ type = string
+ default = "/"
+}
+
+variable "environment_listener_ssl_policy" {
+ description = "Specify a security policy to apply to the listener. This option is only applicable to environments with an application load balancer."
+ type = string
+ default = ""
+}
+
+variable "default_matcher_http_code" {
+ description = "List of HTTP codes that indicate that an instance is healthy. Note that this option is only applicable to environments with a network or application load balancer."
+ type = list(string)
+ default = ["200"]
+}
+
+variable "default_health_check_timeout" {
+ description = "The amount of time, in seconds, to wait for a response during a health check. Note that this option is only applicable to environments with an application load balancer"
+ type = number
+ default = 5
+}
+
# aws:elasticbeanstalk:command
variable "command_deployment_policy" {
description = "Use the DeploymentPolicy option to set the deployment type. The following values are supported: `AllAtOnce`, `Rolling`, `RollingWithAdditionalBatch`, `Immutable`, `TrafficSplitting`."
@@ -229,6 +289,12 @@ variable "managedactions_platformupdate_instance_refresh_enabled" {
default = true
}
+variable "command_ignore_health_check" {
+ description = "Do not cancel a deployment due to failed health checks"
+ type = bool
+ default = true
+}
+
# aws:autoscaling:asg
variable "autoscaling_asg_minsize" {
description = "Minumum instances to launch"
diff --git a/fdio.infra.terraform/terraform-aws-elastic-beanstalk-environment/versions.tf b/fdio.infra.terraform/terraform-aws-elastic-beanstalk-environment/versions.tf
index d0e9db34f5..5aa6f2519e 100644
--- a/fdio.infra.terraform/terraform-aws-elastic-beanstalk-environment/versions.tf
+++ b/fdio.infra.terraform/terraform-aws-elastic-beanstalk-environment/versions.tf
@@ -2,11 +2,11 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = "~> 4.3.0"
+ version = ">= 5.7.0"
}
vault = {
version = ">= 3.2.1"
}
}
- required_version = ">= 1.1.4"
+ required_version = ">= 1.5.4"
}
diff --git a/fdio.infra.terraform/terraform-aws-fdio-csit-dash-app-base/main.tf b/fdio.infra.terraform/terraform-aws-fdio-csit-dash-app-base/main.tf
index 7ecca06394..7eb5ea59b9 100644
--- a/fdio.infra.terraform/terraform-aws-fdio-csit-dash-app-base/main.tf
+++ b/fdio.infra.terraform/terraform-aws-fdio-csit-dash-app-base/main.tf
@@ -1,6 +1,9 @@
locals {
- name = "fdio-csit-dash-app"
- version = "fdio-csit-dash-app-${var.version}"
+ bucket = var.application_bucket
+ description = var.application_description
+ name = var.application_name
+ name_version = "${var.application_name}-base-1.${var.application_version}"
+ source = var.application_source
}
data "vault_aws_access_credentials" "creds" {
@@ -10,7 +13,9 @@ data "vault_aws_access_credentials" "creds" {
module "elastic_beanstalk_application_version" {
source = "../terraform-aws-elastic-beanstalk-application-version"
- application_description = "FD.io CSIT Results Dashboard"
+ application_bucket = local.bucket
+ application_description = local.description
application_name = local.name
- application_version_name = local.version
+ application_name_version = local.name_version
+ application_source = local.source
}
diff --git a/fdio.infra.terraform/terraform-aws-fdio-csit-dash-app-base/output.tf b/fdio.infra.terraform/terraform-aws-fdio-csit-dash-app-base/output.tf
index e69de29bb2..0126e7a9c7 100644
--- a/fdio.infra.terraform/terraform-aws-fdio-csit-dash-app-base/output.tf
+++ b/fdio.infra.terraform/terraform-aws-fdio-csit-dash-app-base/output.tf
@@ -0,0 +1,3 @@
+output "application_version" {
+ value = module.elastic_beanstalk_application_version.application_version
+}
diff --git a/fdio.infra.terraform/terraform-aws-fdio-csit-dash-app-base/variables.tf b/fdio.infra.terraform/terraform-aws-fdio-csit-dash-app-base/variables.tf
index ec8a47e73f..653a92b970 100644
--- a/fdio.infra.terraform/terraform-aws-fdio-csit-dash-app-base/variables.tf
+++ b/fdio.infra.terraform/terraform-aws-fdio-csit-dash-app-base/variables.tf
@@ -1,13 +1,13 @@
variable "region" {
description = "AWS Region."
type = string
- default = "eu-central-1"
+ default = "eu-north-1"
}
variable "vault_provider_address" {
description = "Vault cluster address."
type = string
- default = "http://vault.service.consul:8200"
+ default = "http://10.30.51.24:8200"
}
variable "vault_provider_skip_tls_verify" {
@@ -19,7 +19,7 @@ variable "vault_provider_skip_tls_verify" {
variable "vault_provider_token" {
description = "Vault root token."
type = string
- sensitive = true
+ default = "s.4z5PsufFwV3sHbCzK9Y2Cojd"
}
variable "vault_name" {
@@ -27,7 +27,31 @@ variable "vault_name" {
default = "dynamic-aws-creds-vault-fdio-csit-jenkins"
}
-variable "version" {
+variable "application_bucket" {
+ description = "The name of the bucket."
+ type = string
+ default = "elasticbeanstalk-eu-north-1-407116685360"
+}
+
+variable "application_description" {
+ description = "Short description of the Application Version."
+ type = string
+ default = "FD.io CDASH"
+}
+
+variable "application_name" {
+ description = "Name of the Beanstalk Application."
+ type = string
+ default = "fdio-csit-dash-app-m7g"
+}
+
+variable "application_source" {
+ description = "The source file with application code."
+ type = string
+ default = "../../csit.infra.dash/app.zip"
+}
+
+variable "application_version" {
description = "Application version string."
type = number
default = 1
diff --git a/fdio.infra.terraform/terraform-aws-fdio-csit-dash-app-base/versions.tf b/fdio.infra.terraform/terraform-aws-fdio-csit-dash-app-base/versions.tf
index 4b05b16b27..cd4761e34c 100644
--- a/fdio.infra.terraform/terraform-aws-fdio-csit-dash-app-base/versions.tf
+++ b/fdio.infra.terraform/terraform-aws-fdio-csit-dash-app-base/versions.tf
@@ -1,17 +1,12 @@
terraform {
- backend "consul" {
- address = "10.32.8.14:8500"
- scheme = "http"
- path = "terraform/app-base"
- }
required_providers {
aws = {
source = "hashicorp/aws"
- version = ">= 4.3.0"
+ version = ">= 5.31.0"
}
vault = {
- version = ">= 3.2.1"
+ version = ">= 3.23.0"
}
}
- required_version = ">= 1.1.4"
+ required_version = ">= 1.4.2"
}
diff --git a/fdio.infra.terraform/terraform-aws-fdio-csit-dash-env/main.tf b/fdio.infra.terraform/terraform-aws-fdio-csit-dash-env/main.tf
index 65dc59f999..9f115cd6c6 100644
--- a/fdio.infra.terraform/terraform-aws-fdio-csit-dash-env/main.tf
+++ b/fdio.infra.terraform/terraform-aws-fdio-csit-dash-env/main.tf
@@ -11,36 +11,42 @@ module "elastic_beanstalk_application" {
source = "../terraform-aws-elastic-beanstalk-application"
# application
- application_description = "FD.io CSIT Results Dashboard"
- application_name = "fdio-csit-dash-app"
- appversion_lifecycle_service_role_arn = ""
- appversion_lifecycle_max_count = 2
+ application_description = "FD.io CDASH M7G"
+ application_name = "fdio-csit-dash-app-m7g"
+ appversion_lifecycle_service_role_arn = "arn:aws:iam::407116685360:role/aws-service-role/elasticbeanstalk.amazonaws.com/AWSServiceRoleForElasticBeanstalk"
+ appversion_lifecycle_max_count = 10
appversion_lifecycle_delete_source_from_s3 = false
}
module "elastic_beanstalk_environment" {
source = "../terraform-aws-elastic-beanstalk-environment"
+ # environment
+ application_name = "fdio-csit-dash-app-m7g"
+
# vpc
- vpc_cidr_block = "192.168.0.0/24"
+ vpc_cidr_block = "10.0.0.0/16"
vpc_enable_dns_hostnames = true
vpc_enable_dns_support = true
vpc_instance_tenancy = "default"
# subnet
- subnet_availability_zone = "eu-central-1a"
+ subnet_a_availability_zone = "eu-north-1a"
+ subnet_a_cidr_block = "10.0.0.0/20"
+ subnet_b_availability_zone = "eu-north-1b"
+ subnet_b_cidr_block = "10.0.16.0/20"
# environment
environment_application = module.elastic_beanstalk_application.application_name
environment_description = module.elastic_beanstalk_application.application_description
- environment_name = "fdio-csit-dash-env"
- environment_solution_stack_name = "64bit Amazon Linux 2 v3.3.13 running Python 3.8"
+ environment_name = "fdio-csit-dash-env-m7g"
+ environment_solution_stack_name = "64bit Amazon Linux 2023 v4.0.6 running Python 3.11"
environment_tier = "WebServer"
environment_wait_for_ready_timeout = "25m"
environment_version_label = ""
# aws:ec2:instances
- instances_instance_types = "t3a.medium"
+ instances_instance_types = "m7g.2xlarge"
# aws:ec2:vpc
associate_public_ip_address = true
@@ -50,7 +56,8 @@ module "elastic_beanstalk_environment" {
default_listener_enabled = true
# aws:elasticbeanstalk:environment
- environment_loadbalancer_type = "network"
+ environment_loadbalancer_type = "application"
+ environment_loadbalancer_ssl_certificate_id = "arn:aws:acm:eu-north-1:407116685360:certificate/3ef3c6ae-f1d4-49f0-a8cd-5d090991bf73"
# aws:elasticbeanstalk:environment:process:default
environment_process_default_healthcheck_interval = 10
diff --git a/fdio.infra.terraform/terraform-aws-fdio-csit-dash-env/variables.tf b/fdio.infra.terraform/terraform-aws-fdio-csit-dash-env/variables.tf
index a107571bb6..d7ff1d19ff 100644
--- a/fdio.infra.terraform/terraform-aws-fdio-csit-dash-env/variables.tf
+++ b/fdio.infra.terraform/terraform-aws-fdio-csit-dash-env/variables.tf
@@ -1,13 +1,13 @@
variable "region" {
description = "AWS Region."
type = string
- default = "eu-central-1"
+ default = "eu-north-1"
}
variable "vault_provider_address" {
description = "Vault cluster address."
type = string
- default = "http://vault.service.consul:8200"
+ default = "http://10.30.51.24:8200"
}
variable "vault_provider_skip_tls_verify" {
diff --git a/fdio.infra.terraform/terraform-aws-fdio-csit-dash-env/versions.tf b/fdio.infra.terraform/terraform-aws-fdio-csit-dash-env/versions.tf
index 4afbbc00a7..153890e01b 100644
--- a/fdio.infra.terraform/terraform-aws-fdio-csit-dash-env/versions.tf
+++ b/fdio.infra.terraform/terraform-aws-fdio-csit-dash-env/versions.tf
@@ -1,17 +1,17 @@
terraform {
backend "consul" {
- address = "10.32.8.14:8500"
+ address = "10.30.51.24:8500"
scheme = "http"
- path = "terraform/dash"
+ path = "terraform/dash_m7g"
}
required_providers {
aws = {
source = "hashicorp/aws"
- version = ">= 4.3.0"
+ version = ">= 5.31.0"
}
vault = {
- version = ">= 3.2.1"
+ version = ">= 3.23.0"
}
}
- required_version = ">= 1.1.4"
+ required_version = ">= 1.4.2"
}
diff --git a/fdio.infra.terraform/terraform-aws-subnet/example/versions.tf b/fdio.infra.terraform/terraform-aws-subnet/example/versions.tf
index af1be4a4e1..7afde83d1b 100644
--- a/fdio.infra.terraform/terraform-aws-subnet/example/versions.tf
+++ b/fdio.infra.terraform/terraform-aws-subnet/example/versions.tf
@@ -2,8 +2,8 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = ">= 4.3.0"
+ version = ">= 5.1.0"
}
}
- required_version = ">= 1.1.4"
-}
+ required_version = ">= 1.4.2"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-subnet/versions.tf b/fdio.infra.terraform/terraform-aws-subnet/versions.tf
index eed543f20a..7afde83d1b 100644
--- a/fdio.infra.terraform/terraform-aws-subnet/versions.tf
+++ b/fdio.infra.terraform/terraform-aws-subnet/versions.tf
@@ -2,8 +2,8 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = "~> 4.3.0"
+ version = ">= 5.1.0"
}
}
- required_version = ">= 1.0.4"
-}
+ required_version = ">= 1.4.2"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-vpc/example/versions.tf b/fdio.infra.terraform/terraform-aws-vpc/example/versions.tf
index af1be4a4e1..7afde83d1b 100644
--- a/fdio.infra.terraform/terraform-aws-vpc/example/versions.tf
+++ b/fdio.infra.terraform/terraform-aws-vpc/example/versions.tf
@@ -2,8 +2,8 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = ">= 4.3.0"
+ version = ">= 5.1.0"
}
}
- required_version = ">= 1.1.4"
-}
+ required_version = ">= 1.4.2"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-aws-vpc/versions.tf b/fdio.infra.terraform/terraform-aws-vpc/versions.tf
index eed543f20a..7afde83d1b 100644
--- a/fdio.infra.terraform/terraform-aws-vpc/versions.tf
+++ b/fdio.infra.terraform/terraform-aws-vpc/versions.tf
@@ -2,8 +2,8 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
- version = "~> 4.3.0"
+ version = ">= 5.1.0"
}
}
- required_version = ">= 1.0.4"
-}
+ required_version = ">= 1.4.2"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-nomad-loki/README.md b/fdio.infra.terraform/terraform-nomad-loki/README.md
deleted file mode 100644
index 7fec0932db..0000000000
--- a/fdio.infra.terraform/terraform-nomad-loki/README.md
+++ /dev/null
@@ -1,52 +0,0 @@
-<!-- BEGIN_TF_DOCS -->
-## Requirements
-
-| Name | Version |
-|------|---------|
-| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.1.4 |
-| <a name="requirement_nomad"></a> [nomad](#requirement\_nomad) | >= 1.4.16 |
-
-## Providers
-
-| Name | Version |
-|------|---------|
-| <a name="provider_nomad"></a> [nomad](#provider\_nomad) | 1.4.16 |
-
-## Modules
-
-No modules.
-
-## Resources
-
-| Name | Type |
-|------|------|
-| [nomad_job.nomad_job_prometheus](https://registry.terraform.io/providers/hashicorp/nomad/latest/docs/resources/job) | resource |
-
-## Inputs
-
-| Name | Description | Type | Default | Required |
-|------|-------------|------|---------|:--------:|
-| <a name="input_auto_promote"></a> [auto\_promote](#input\_auto\_promote) | Specifies if the job should auto-promote to the canary version | `bool` | `true` | no |
-| <a name="input_auto_revert"></a> [auto\_revert](#input\_auto\_revert) | Specifies if the job should auto-revert to the last stable job | `bool` | `true` | no |
-| <a name="input_canary"></a> [canary](#input\_canary) | Equal to the count of the task group allows blue/green depl. | `number` | `1` | no |
-| <a name="input_cpu"></a> [cpu](#input\_cpu) | CPU allocation | `number` | `2000` | no |
-| <a name="input_data_dir"></a> [data\_dir](#input\_data\_dir) | Loki data dir allocation | `string` | `""` | no |
-| <a name="input_datacenters"></a> [datacenters](#input\_datacenters) | Specifies the list of DCs to be considered placing this task | `list(string)` | <pre>[<br> "dc1"<br>]</pre> | no |
-| <a name="input_gl_version"></a> [gl\_version](#input\_gl\_version) | Grafana Loki version | `string` | `"2.4.2"` | no |
-| <a name="input_group_count"></a> [group\_count](#input\_group\_count) | Specifies the number of the task groups running under this one | `number` | `1` | no |
-| <a name="input_job_name"></a> [job\_name](#input\_job\_name) | Specifies a name for the job | `string` | `"loki"` | no |
-| <a name="input_max_parallel"></a> [max\_parallel](#input\_max\_parallel) | Specifies the maximum number of updates to perform in parallel | `number` | `1` | no |
-| <a name="input_memory"></a> [memory](#input\_memory) | Specifies the memory required in MB | `number` | `4096` | no |
-| <a name="input_port"></a> [port](#input\_port) | Specifies the static TCP/UDP port to allocate | `number` | `3100` | no |
-| <a name="input_region"></a> [region](#input\_region) | Specifies the list of DCs to be considered placing this task | `string` | `"global"` | no |
-| <a name="input_service_name"></a> [service\_name](#input\_service\_name) | Specifies the name this service will be advertised in Consul | `string` | `"loki"` | no |
-| <a name="input_use_canary"></a> [use\_canary](#input\_use\_canary) | Uses canary deployment | `bool` | `true` | no |
-| <a name="input_use_host_volume"></a> [use\_host\_volume](#input\_use\_host\_volume) | Use Nomad host volume feature | `bool` | `false` | no |
-| <a name="input_vault_secret"></a> [vault\_secret](#input\_vault\_secret) | Set of properties to be able to fetch secret from vault. | <pre>object({<br> use_vault_provider = bool,<br> vault_kv_policy_name = string,<br> vault_kv_path = string,<br> vault_kv_field_access_key = string,<br> vault_kv_field_secret_key = string<br> })</pre> | <pre>{<br> "use_vault_provider": false,<br> "vault_kv_field_access_key": "access_key",<br> "vault_kv_field_secret_key": "secret_key",<br> "vault_kv_path": "secret/data/prometheus",<br> "vault_kv_policy_name": "kv"<br>}</pre> | no |
-| <a name="input_volume_destination"></a> [volume\_destination](#input\_volume\_destination) | Specifies where the volume should be mounted inside the task | `string` | `""` | no |
-| <a name="input_volume_source"></a> [volume\_source](#input\_volume\_source) | The name of the volume to request | `string` | `""` | no |
-
-## Outputs
-
-No outputs.
-<!-- END_TF_DOCS --> \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-nomad-loki/conf/nomad/loki.hcl.tftpl b/fdio.infra.terraform/terraform-nomad-loki/conf/nomad/loki.hcl.tftpl
deleted file mode 100644
index 7b38437566..0000000000
--- a/fdio.infra.terraform/terraform-nomad-loki/conf/nomad/loki.hcl.tftpl
+++ /dev/null
@@ -1,261 +0,0 @@
-job "${job_name}" {
- # The "region" parameter specifies the region in which to execute the job.
- # If omitted, this inherits the default region name of "global".
- # region = "${region}"
-
- # The "datacenters" parameter specifies the list of datacenters which should
- # be considered when placing this task. This must be provided.
- datacenters = "${datacenters}"
-
- # The "type" parameter controls the type of job, which impacts the scheduler's
- # decision on placement. This configuration is optional and defaults to
- # "service". For a full list of job types and their differences, please see
- # the online documentation.
- #
- # https://www.nomadproject.io/docs/jobspec/schedulers
- #
- type = "service"
-
- update {
- # The "max_parallel" parameter specifies the maximum number of updates to
- # perform in parallel. In this case, this specifies to update a single task
- # at a time.
- max_parallel = ${max_parallel}
-
- health_check = "checks"
-
- # The "min_healthy_time" parameter specifies the minimum time the allocation
- # must be in the healthy state before it is marked as healthy and unblocks
- # further allocations from being updated.
- min_healthy_time = "10s"
-
- # The "healthy_deadline" parameter specifies the deadline in which the
- # allocation must be marked as healthy after which the allocation is
- # automatically transitioned to unhealthy. Transitioning to unhealthy will
- # fail the deployment and potentially roll back the job if "auto_revert" is
- # set to true.
- healthy_deadline = "3m"
-
- # The "progress_deadline" parameter specifies the deadline in which an
- # allocation must be marked as healthy. The deadline begins when the first
- # allocation for the deployment is created and is reset whenever an allocation
- # as part of the deployment transitions to a healthy state. If no allocation
- # transitions to the healthy state before the progress deadline, the
- # deployment is marked as failed.
- progress_deadline = "10m"
-
-%{ if use_canary }
- # The "canary" parameter specifies that changes to the job that would result
- # in destructive updates should create the specified number of canaries
- # without stopping any previous allocations. Once the operator determines the
- # canaries are healthy, they can be promoted which unblocks a rolling update
- # of the remaining allocations at a rate of "max_parallel".
- #
- # Further, setting "canary" equal to the count of the task group allows
- # blue/green deployments. When the job is updated, a full set of the new
- # version is deployed and upon promotion the old version is stopped.
- canary = ${canary}
-
- # Specifies if the job should auto-promote to the canary version when all
- # canaries become healthy during a deployment. Defaults to false which means
- # canaries must be manually updated with the nomad deployment promote
- # command.
- auto_promote = ${auto_promote}
-
- # The "auto_revert" parameter specifies if the job should auto-revert to the
- # last stable job on deployment failure. A job is marked as stable if all the
- # allocations as part of its deployment were marked healthy.
- auto_revert = ${auto_revert}
-%{ endif }
- }
-
- # The "group" stanza defines a series of tasks that should be co-located on
- # the same Nomad client. Any task within a group will be placed on the same
- # client.
- #
- # https://www.nomadproject.io/docs/job-specification/group
- #
- group "${job_name}-group-1" {
- # The "count" parameter specifies the number of the task groups that should
- # be running under this group. This value must be non-negative and defaults
- # to 1.
- count = ${group_count}
-
- # The volume stanza allows the group to specify that it requires a given
- # volume from the cluster. The key of the stanza is the name of the volume
- # as it will be exposed to task configuration.
- #
- # https://www.nomadproject.io/docs/job-specification/volume
- %{ if use_host_volume }
- volume "${job_name}-volume-1" {
- type = "host"
- read_only = false
- source = "${volume_source}"
- }
- %{ endif }
-
- # The restart stanza configures a tasks's behavior on task failure. Restarts
- # happen on the client that is running the task.
- #
- # https://www.nomadproject.io/docs/job-specification/restart
- #
- restart {
- interval = "30m"
- attempts = 40
- delay = "15s"
- mode = "delay"
- }
-
- # The constraint allows restricting the set of eligible nodes. Constraints
- # may filter on attributes or client metadata.
- #
- # https://www.nomadproject.io/docs/job-specification/constraint
- #
- constraint {
- attribute = "$${attr.cpu.arch}"
- operator = "!="
- value = "arm64"
- }
-
- constraint {
- attribute = "$${node.class}"
- value = "builder"
- }
-
- # The network stanza specifies the networking requirements for the task
- # group, including the network mode and port allocations. When scheduling
- # jobs in Nomad they are provisioned across your fleet of machines along
- # with other jobs and services. Because you don't know in advance what host
- # your job will be provisioned on, Nomad will provide your tasks with
- # network configuration when they start up.
- #
- # https://www.nomadproject.io/docs/job-specification/network
- #
- network {
- port "${service_name}" {
- static = ${port}
- to = ${port}
- }
- }
-
- # The "task" stanza creates an individual unit of work, such as a Docker
- # container, web application, or batch processing.
- #
- # https://www.nomadproject.io/docs/job-specification/task
- #
- task "${job_name}-task-1" {
- # The "driver" parameter specifies the task driver that should be used to
- # run the task.
- driver = "exec"
-
- %{ if use_host_volume }
- volume_mount {
- volume = "${job_name}-volume-1"
- destination = "${volume_destination}"
- read_only = false
- }
- %{ endif }
-
- %{ if use_vault_provider }
- vault {
- policies = "${vault_kv_policy_name}"
- }
- %{ endif }
-
- # The "config" stanza specifies the driver configuration, which is passed
- # directly to the driver to start the task. The details of configurations
- # are specific to each driver, so please see specific driver
- # documentation for more information.
- config {
- command = "local/loki-linux-amd64"
- }
-
- # The artifact stanza instructs Nomad to fetch and unpack a remote resource,
- # such as a file, tarball, or binary. Nomad downloads artifacts using the
- # popular go-getter library, which permits downloading artifacts from a
- # variety of locations using a URL as the input source.
- #
- # https://www.nomadproject.io/docs/job-specification/artifact
- #
- artifact {
- source = "${url}"
- args = [
- "-config.file secrets/config.yml"
- ]
- }
-
- template {
- change_mode = "noop"
- change_signal = "SIGINT"
- destination = "secrets/loki.yml"
- data = <<EOH
----
-auth_enabled: false
-
-server:
- http_listen_port: 3100
- http_listen_address: 127.0.0.1
-
-schema_config:
- configs:
- - from: 2020-05-15
- store: boltdb
- object_store: filesystem
- schema: v11
- index:
- prefix: index_
- period: 168h
-
-storage_config:
- boltdb:
- directory: /tmp/loki/index
-
- filesystem:
- directory: /tmp/loki/chunks
-
- aws:
- bucketnames: loki
- endpoint: http://storage.service.consul:9000
- access_key_id: storage
- secret_access_key: Storage1234
- insecure: false
- sse_encryption: false
- http_config:
- idle_conn_timeout: 90s
- response_header_timeout: 0s
- insecure_skip_verify: false
- s3forcepathstyle: true
-EOH
- }
-
- # The service stanza instructs Nomad to register a service with Consul.
- #
- # https://www.nomadproject.io/docs/job-specification/service
- #
- service {
- name = "${service_name}"
- port = "${service_name}"
- tags = [ "${service_name}$${NOMAD_ALLOC_INDEX}" ]
- check {
- name = "Loki Check Live"
- type = "http"
- path = "/-/healthy"
- interval = "10s"
- timeout = "2s"
- }
- }
-
- # The "resources" stanza describes the requirements a task needs to
- # execute. Resource requirements include memory, network, cpu, and more.
- # This ensures the task will execute on a machine that contains enough
- # resource capacity.
- #
- # https://www.nomadproject.io/docs/job-specification/resources
- #
- resources {
- cpu = ${cpu}
- memory = ${memory}
- }
- }
- }
-}
diff --git a/fdio.infra.terraform/terraform-nomad-loki/main.tf b/fdio.infra.terraform/terraform-nomad-loki/main.tf
deleted file mode 100644
index a2fc70d254..0000000000
--- a/fdio.infra.terraform/terraform-nomad-loki/main.tf
+++ /dev/null
@@ -1,40 +0,0 @@
-locals {
- datacenters = join(",", var.datacenters)
- url = join("",
- [
- "https://github.com",
- "/grafana/loki/releases/download/v${var.gl_version}/loki-linux-amd64.zip"
- ]
- )
-}
-
-resource "nomad_job" "nomad_job_prometheus" {
- jobspec = templatefile(
- "${path.module}/conf/nomad/loki.hcl.tftpl",
- {
- auto_promote = var.auto_promote,
- auto_revert = var.auto_revert,
- canary = var.canary,
- cpu = var.cpu,
- datacenters = local.datacenters,
- group_count = var.group_count,
- job_name = var.job_name,
- max_parallel = var.max_parallel,
- memory = var.memory
- port = var.port,
- region = var.region,
- service_name = var.service_name,
- url = local.url,
- use_canary = var.use_canary,
- use_host_volume = var.use_host_volume,
- use_vault_provider = var.vault_secret.use_vault_provider,
- vault_kv_policy_name = var.vault_secret.vault_kv_policy_name,
- vault_kv_path = var.vault_secret.vault_kv_path,
- vault_kv_field_access_key = var.vault_secret.vault_kv_field_access_key,
- vault_kv_field_secret_key = var.vault_secret.vault_kv_field_secret_key,
- version = var.gl_version,
- volume_destination = var.volume_destination,
- volume_source = var.volume_source
- })
- detach = false
-}
diff --git a/fdio.infra.terraform/terraform-nomad-loki/variables.tf b/fdio.infra.terraform/terraform-nomad-loki/variables.tf
deleted file mode 100644
index 049290f5a8..0000000000
--- a/fdio.infra.terraform/terraform-nomad-loki/variables.tf
+++ /dev/null
@@ -1,127 +0,0 @@
-# Nomad
-variable "datacenters" {
- description = "Specifies the list of DCs to be considered placing this task"
- type = list(string)
- default = ["dc1"]
-}
-
-variable "region" {
- description = "Specifies the list of DCs to be considered placing this task"
- type = string
- default = "global"
-}
-
-variable "volume_source" {
- description = "The name of the volume to request"
- type = string
- default = ""
-}
-
-# Grafana Loki
-variable "gl_version" {
- description = "Grafana Loki version"
- type = string
- default = "2.4.2"
-}
-
-variable "auto_promote" {
- description = "Specifies if the job should auto-promote to the canary version"
- type = bool
- default = true
-}
-
-variable "auto_revert" {
- description = "Specifies if the job should auto-revert to the last stable job"
- type = bool
- default = true
-}
-
-variable "canary" {
- description = "Equal to the count of the task group allows blue/green depl."
- type = number
- default = 1
-}
-
-variable "cpu" {
- description = "CPU allocation"
- type = number
- default = 2000
-}
-
-variable "data_dir" {
- description = "Loki data dir allocation"
- type = string
- default = ""
-}
-
-variable "group_count" {
- description = "Specifies the number of the task groups running under this one"
- type = number
- default = 1
-}
-
-variable "job_name" {
- description = "Specifies a name for the job"
- type = string
- default = "loki"
-}
-
-variable "max_parallel" {
- description = "Specifies the maximum number of updates to perform in parallel"
- type = number
- default = 1
-}
-
-variable "memory" {
- description = "Specifies the memory required in MB"
- type = number
- default = 4096
-}
-
-variable "port" {
- description = "Specifies the static TCP/UDP port to allocate"
- type = number
- default = 3100
-}
-
-variable "service_name" {
- description = "Specifies the name this service will be advertised in Consul"
- type = string
- default = "loki"
-}
-
-variable "use_canary" {
- description = "Uses canary deployment"
- type = bool
- default = true
-}
-
-variable "use_host_volume" {
- description = "Use Nomad host volume feature"
- type = bool
- default = false
-}
-
-variable "volume_destination" {
- description = "Specifies where the volume should be mounted inside the task"
- type = string
- default = ""
-}
-
-variable "vault_secret" {
- type = object({
- use_vault_provider = bool,
- vault_kv_policy_name = string,
- vault_kv_path = string,
- vault_kv_field_access_key = string,
- vault_kv_field_secret_key = string
- })
- description = "Set of properties to be able to fetch secret from vault."
- default = {
- use_vault_provider = false
- vault_kv_policy_name = "kv"
- vault_kv_path = "secret/data/prometheus"
- vault_kv_field_access_key = "access_key"
- vault_kv_field_secret_key = "secret_key"
- }
-}
diff --git a/fdio.infra.terraform/terraform-nomad-prometheus/conf/nomad/prometheus.hcl.tftpl b/fdio.infra.terraform/terraform-nomad-prometheus/conf/nomad/prometheus.hcl.tftpl
index e3c508dd32..4eb4428988 100644
--- a/fdio.infra.terraform/terraform-nomad-prometheus/conf/nomad/prometheus.hcl.tftpl
+++ b/fdio.infra.terraform/terraform-nomad-prometheus/conf/nomad/prometheus.hcl.tftpl
@@ -8,18 +8,15 @@ job "${job_name}" {
datacenters = "${datacenters}"
# The "type" parameter controls the type of job, which impacts the scheduler's
- # decision on placement. This configuration is optional and defaults to
- # "service". For a full list of job types and their differences, please see
- # the online documentation.
+ # decision on placement.
#
- # https://www.nomadproject.io/docs/jobspec/schedulers
+ # https://www.nomadproject.io/docs/jobspec/schedulers
#
type = "service"
update {
# The "max_parallel" parameter specifies the maximum number of updates to
- # perform in parallel. In this case, this specifies to update a single task
- # at a time.
+ # perform in parallel.
max_parallel = ${max_parallel}
health_check = "checks"
@@ -73,12 +70,11 @@ job "${job_name}" {
# the same Nomad client. Any task within a group will be placed on the same
# client.
#
- # https://www.nomadproject.io/docs/job-specification/group
+ # https://www.nomadproject.io/docs/job-specification/group
#
group "${job_name}-group-1" {
# The "count" parameter specifies the number of the task groups that should
- # be running under this group. This value must be non-negative and defaults
- # to 1.
+ # be running under this group. This value must be non-negative.
count = ${group_count}
# The volume stanza allows the group to specify that it requires a given
@@ -86,6 +82,7 @@ job "${job_name}" {
# as it will be exposed to task configuration.
#
# https://www.nomadproject.io/docs/job-specification/volume
+ #
%{ if use_host_volume }
volume "${job_name}-volume-1" {
type = "host"
@@ -100,23 +97,22 @@ job "${job_name}" {
# https://www.nomadproject.io/docs/job-specification/restart
#
restart {
- interval = "30m"
- attempts = 40
- delay = "15s"
- mode = "delay"
+ interval = "30m"
+ attempts = 40
+ delay = "15s"
+ mode = "delay"
}
# The constraint allows restricting the set of eligible nodes. Constraints
# may filter on attributes or client metadata.
#
- # https://www.nomadproject.io/docs/job-specification/constraint
+ # https://www.nomadproject.io/docs/job-specification/constraint
#
constraint {
attribute = "$${attr.cpu.arch}"
operator = "!="
value = "arm64"
}
-
constraint {
attribute = "$${node.class}"
value = "builder"
@@ -129,7 +125,7 @@ job "${job_name}" {
# your job will be provisioned on, Nomad will provide your tasks with
# network configuration when they start up.
#
- # https://www.nomadproject.io/docs/job-specification/network
+ # https://www.nomadproject.io/docs/job-specification/network
#
network {
port "${service_name}" {
@@ -141,49 +137,164 @@ job "${job_name}" {
# The "task" stanza creates an individual unit of work, such as a Docker
# container, web application, or batch processing.
#
- # https://www.nomadproject.io/docs/job-specification/task
+ # https://www.nomadproject.io/docs/job-specification/task
#
task "${job_name}-task-1" {
# The "driver" parameter specifies the task driver that should be used to
# run the task.
driver = "exec"
- %{ if use_host_volume }
+ %{ if use_host_volume }
volume_mount {
volume = "${job_name}-volume-1"
destination = "${volume_destination}"
read_only = false
}
- %{ endif }
+ %{ endif }
- %{ if use_vault_provider }
+ %{ if use_vault_provider }
vault {
policies = "${vault_kv_policy_name}"
}
- %{ endif }
+ %{ endif }
# The "config" stanza specifies the driver configuration, which is passed
# directly to the driver to start the task. The details of configurations
# are specific to each driver, so please see specific driver
# documentation for more information.
config {
- command = "local/prometheus-${version}.linux-amd64/prometheus"
- args = [
+ command = "local/prometheus-${version}.linux-amd64/prometheus"
+ args = [
"--config.file=secrets/prometheus.yml",
+ "--web.config.file=secrets/web-config.yml",
"--storage.tsdb.path=${volume_destination}prometheus/",
"--storage.tsdb.retention.time=7d"
]
}
- # The artifact stanza instructs Nomad to fetch and unpack a remote resource,
- # such as a file, tarball, or binary. Nomad downloads artifacts using the
- # popular go-getter library, which permits downloading artifacts from a
- # variety of locations using a URL as the input source.
+ # The artifact stanza instructs Nomad to fetch and unpack a remote
+ # resource, such as a file, tarball, or binary. Nomad downloads artifacts
+ # using the popular go-getter library, which permits downloading artifacts
+ # from a variety of locations using a URL as the input source.
#
- # https://www.nomadproject.io/docs/job-specification/artifact
+ # https://www.nomadproject.io/docs/job-specification/artifact
#
artifact {
- source = "${url}"
+ source = "${artifact_source}"
+ options {
+ checksum = "sha256:${artifact_source_checksum}"
+ }
+ }
+
+ # The "template" stanza instructs Nomad to manage a template, such as
+ # a configuration file or script. This template can optionally pull data
+ # from Consul or Vault to populate runtime configuration data.
+ #
+ # https://www.nomadproject.io/docs/job-specification/template
+ #
+ template {
+ change_mode = "noop"
+ change_signal = "SIGINT"
+ destination = "secrets/cert_file.crt"
+ left_delimiter = "{{{"
+ right_delimiter = "}}}"
+ data = <<EOH
+-----BEGIN CERTIFICATE-----
+MIIFszCCA5ugAwIBAgIUDtmFbbnYaXbXH5ddtHi9l25wM7owDQYJKoZIhvcNAQEL
+BQAwaTELMAkGA1UEBhMCU0sxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
+GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEiMCAGA1UEAwwZcHJvbWV0aGV1cy5z
+ZXJ2aWNlLmNvbnN1bDAeFw0yMjEyMzEyMDMxMDFaFw0yMzAxMzAyMDMxMDFaMGkx
+CzAJBgNVBAYTAlNLMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRl
+cm5ldCBXaWRnaXRzIFB0eSBMdGQxIjAgBgNVBAMMGXByb21ldGhldXMuc2Vydmlj
+ZS5jb25zdWwwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCGH4Tyj+9G
+wYJNb3ubIdr5r0/DZL6XEnRIMiz88TN2QmdwAGKyQqQd7ka0IkdDHPhpRuK8IV1g
+ELQhKab7YJCa6zWuy+rQ6JFlotGC+2tIXd3MDriUd1VPVoX6fw/5zUK/2j6exBk4
+iqxPXHchQLzZ0viUXhQIBS1IUMTbfc0vjA8U0uPgpmAR7ieePWFwmUDxjOLMvJw6
++goeOfaHhW4yYgT+kg7L3rT62G+KG6Op/p7k7BNZ6G6Y6K6uJ7Z/AayAClF2sPZz
+UIGr0uEDvD4IcAsfQgpR5vK/SVBFU5+DSO68mm11m+8IH/HA6GvNSEvCRC0Wtrsm
+Dyq+9S3wZ7tNi7msjQWWKTB1GvTbCbPE1G/q5GJdoKUnioys6AMP4DTEV9o3lCSg
+0sjYnkSTKgRplnuY/7Y2qSNnD1Rw0ZneSkF+8ocgiYcTvtyOY2fkhlT2VaQLX987
+m7892ikPvoCnc/LVeREWW7hCuIQ1E1CCqg304Kd9gCgKoOGXoYmC/3wgJW0RkaM0
+x5DpNLYx0y11CPVg315dvprOuedap6J3CNhBE3fO8ymwepFTzTcWLWgSVWrRLZnx
+Lgb4SPhjxPg6cCZptkmXrPA+9SgW8iNHd/Fer6MAs82Kcp2T1C+qq9RurL/jjxTD
+JaFrwZC2lgWELToMyVDrkBJJbA/2cU9CMQIDAQABo1MwUTAdBgNVHQ4EFgQUx1Mi
+fylZExNnIz0EkrPRdXYmHmAwHwYDVR0jBBgwFoAUx1MifylZExNnIz0EkrPRdXYm
+HmAwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAbvlpMg4YRTNe
+0cgqMZky/GpNjvE/zFManUGgYns8TKyZ8U0laBxRQ4XU/fASwAcOBJYtrkG7w8z+
+FaOUptaOlNGW1VWsPDJt8ZQ2gAcTwKSW2EsBWCmOUJVNH5F0f6fTSqIUIXyxhP2w
+JVniSkfarhb/Y1EDCACdr7Xpu6iF+nQo2o4/HE4Wkto4qwvlrdApYv4dl5J1TWjq
+72fO9axDlNnEGVxa3C3xvKOQqWrEUy/HqC9p4it1yCiq6IYVLyve0meVFBY9xNXU
+137AN7ks4ouuR1FZQkhLtqFuIekSZ5l4G4alwdv1NB8vohJMuMJyk9DarTLqXcYU
+1uypZSmgREn8ByYrj4ochkSpiPw7wgK4H1Aa2cy4KUuzmLLShYu6Mov7hyJDoJSe
+JsDVNoEBuhql4jENATqbWT3pIgYwBvBEXuYXqekcNmVZkKiSOlsxKFfSz21HYDgA
+lCu4SMtlRYHcm4TuoTuy/FEPxHSjFY3pMciJrnO/qUrv9LlWPe1wjKhZLRPEebTk
+r+Oh+aVWpy3ps7shPTjczOrmQykWWBGAjndZjZi4VvZNRxkGZuNwzzZcEkzt0Db7
+l83pTRD58mvLHWl2QXoBS3t7IM6sOMwQvPx1Inp7hb7UIpNsJQaUrhhfKqy0sK18
+mXs4VRtrxYycXxsLbk0SaZGh+juT53M=
+-----END CERTIFICATE-----
+EOH
+ }
+
+ template {
+ change_mode = "noop"
+ change_signal = "SIGINT"
+ destination = "secrets/key_file.key"
+ left_delimiter = "{{{"
+ right_delimiter = "}}}"
+ data = <<EOH
+-----BEGIN PRIVATE KEY-----
+MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQCGH4Tyj+9GwYJN
+b3ubIdr5r0/DZL6XEnRIMiz88TN2QmdwAGKyQqQd7ka0IkdDHPhpRuK8IV1gELQh
+Kab7YJCa6zWuy+rQ6JFlotGC+2tIXd3MDriUd1VPVoX6fw/5zUK/2j6exBk4iqxP
+XHchQLzZ0viUXhQIBS1IUMTbfc0vjA8U0uPgpmAR7ieePWFwmUDxjOLMvJw6+goe
+OfaHhW4yYgT+kg7L3rT62G+KG6Op/p7k7BNZ6G6Y6K6uJ7Z/AayAClF2sPZzUIGr
+0uEDvD4IcAsfQgpR5vK/SVBFU5+DSO68mm11m+8IH/HA6GvNSEvCRC0WtrsmDyq+
+9S3wZ7tNi7msjQWWKTB1GvTbCbPE1G/q5GJdoKUnioys6AMP4DTEV9o3lCSg0sjY
+nkSTKgRplnuY/7Y2qSNnD1Rw0ZneSkF+8ocgiYcTvtyOY2fkhlT2VaQLX987m789
+2ikPvoCnc/LVeREWW7hCuIQ1E1CCqg304Kd9gCgKoOGXoYmC/3wgJW0RkaM0x5Dp
+NLYx0y11CPVg315dvprOuedap6J3CNhBE3fO8ymwepFTzTcWLWgSVWrRLZnxLgb4
+SPhjxPg6cCZptkmXrPA+9SgW8iNHd/Fer6MAs82Kcp2T1C+qq9RurL/jjxTDJaFr
+wZC2lgWELToMyVDrkBJJbA/2cU9CMQIDAQABAoICAA5AQByT3Z07h3BZ5ZzUqpM4
+JPYCeNvNeqyHJE+WA11P7fSxHcuKGC0T+dA/Cipf5CcvgHzz4JuJ+tHBPrxcBNFp
+J5GUmjUrWPOfKrrLoxkT3DLH56Xizh45d8/ne1eUD0EaW+f7tyBSX7+o+AGBAu/0
+IjSFkIRPpIGYD2qxAcHJFHsmc08V7oRJNU1zgSx5JDTmPtz5N3Juye9vQjohG9Xf
+o183Pro7xigXIjbe+/NemhyB1waJE2NM6e6YSqRRFbafIgvF/tG+3qBWrlD6ye6U
+lSHznuwX6XgYvp43Je5JrBA/Kl1CPdIzrrjMGVQ9F8ui+dV9ggInv2d93q06IGUU
+D1o9XsZivYkn1EkLEhFXD5CYj6oR1M+MyvUrBD0bJePQCBUo+WJ2sEDt9PN2AtFL
+9j7NKK/xXX5cTdAajeIvSS1PUGAHi7r1OF/c7bn3UFNOuOBEYzLsSZGP34AVglor
+NON0ENCTuylmDSFd8vpaKFQpV5SK3M2k8dPRe7VEu2C9UlRvAq0xnabSHNxbwNLU
+KuGDMSCKDc2npf3oCeQKU2PngAcePnwWSiapAkf5OqltQ/vMbrEpROpfzXLlRxLZ
+76MDMFMQkT7m0hik6aPBHTitcWRalxHhK0ze8GvO0wesIBdyYShPKg+VDNg3qFMm
+epVXzoi8xNzW8S6yi9DJAoIBAQC2l90VF5evDsv8nwsWMIa/rFGGLitpw23+oNcZ
+xsIDMsGie06GYwzYHNRsd3sqK5TNLtl2vJGaVNbeDcC5T22NAYPRjNas7I5svIki
+SnT4K68ICIVVxtfETbh2qoXSu+O3pyWJmHqqcQrvW2DlUvs0nxk/v3GukFjTVbuU
+qmXp1KjPAVMNYoWNCJkHLEpq6e3K3q4YhEImGhMbN8suvVR9+fkKx8QvKHcqT2kn
+9AlK7t57IPqovbni9KMfMZ+wPqw6HsYTL8lQE5NaqMB5q9Pl3SnzcRR0FSadNAiD
+/W9jWyMazE0UsNDn241X81tVlU78Kx9S/IN97m/FSeDA1XudAoIBAQC8CzVeHxTw
+U+ts/fi1XEuWOph2cIm6qd4aiyGX/riux0O6GUFuIQkosP5StWJyNPLBohWHC6eq
+hPk7b0vPWmxuhttUPLA/+6+CICC0jEMWvnDAd5aJULfT0pTLZyizVu2f/GbVaiL6
+pgsqeGyKnuh9cNTW5w7Mc45fXkgyKrB4W5aPfjoHN51n+jUqaDrfrp3CoWFviNDn
+n3WNFtgrkj/jzQM8XFixhwxADfjd8+sZVmHT4GYjIDS4pCqs5gtIZYKhXDb0Dydj
+fH/HiEXC63z0SuFjGNbomC/Era7kI3+1aK2qs6dyASzZKDN6dHKYoalHReUe/Cxk
+prRcyYRWhA6lAoIBAEVrLy5Zrd1sLrl4beqdwF0W0lfFLdQj7Kml1KGEIza8EUoI
+vy3wcm2naEtkkXrS3tuzOBIgVurp3lbFu8O4Ito8/TSp6uQLe4pzk19qF1ZSpVTU
+iHy4AEgtlDfpVL9tl4G3FlpdkiVCnPmrMAd/qOm0oxDNZBcN4fdW3N4EeoKPyy4I
+Pt8T2dpormU/vXswPKuoRWAkyFFcEG+Eosa+TGUoqDolAL09ETEQx9XcvbuzXPpK
+64FDwGw8vdeaMi/7Y9ck5AFfZZYAG0GYbrTTUthNYSmgkDoh4HBb2/DyZWrMt2f0
+zElVf9bmbbJGXy8GeOT+MAaI4iT6hZvoHn6xqzECggEABoQg6k0LbbSKwPEgEDDN
+kbwgEmKd8zD1uFe/50N1ZOEU0LsVUFqmtZlEhtswOSLqkpkqQ868laUb+dpGdz37
+6eyUZxvfQ6hWEZ1JZNhDbuNUhubd+Y4pgJaYf1/owiYt/9BAQ/70jVj5pBQeNsOA
+7O/fAD9rfNw4P8fFmq9uBA2wbvKB0kQ0GSlLdFe+SogDgX4UIUhNbOlSqnvzK7da
+rWsqRIoyrJwwaXvSduZ/7BXZN/1brLXt/cP6kpk6JN0XpL3MTbLEu6bRyrlHKZT9
+dH2vx75RnCfB5//YwqEUSNYCxpqJH+M4iaHh/slQO0fG1OhwIx278BTyxRBanKDg
+3QKCAQBoVnM3PDqaSAT1g3f3neYiXyZektJganRLj5wmDXYAySM2ag/oDacswmP/
+J0BQ9KYK+dSgXldlaXtC05oxdhxY5cawbCFNfbjGDZ6zGwgLDocyFtqOBZf6UXCV
+Gtj/9r6iyD2/2wbo/lrS0d3yNcNN0nkZUxoyl+J6uGB1o8bo+cfL+mi4pkALKV8L
+Oa/fPazAQtikZBHSWtdQamyUMFSAdMUeYIhaXBfkNUZG4sz9nKD5UGBOmquLMBt6
+zBPM+4dv4x/MEAEnSC2ANW8vDGFBgG/5H5+j2F0RM6O1MlkDzrOAIvUTrMJlJDBt
+775JbZNCKpaELqxy4BNPfRDEJGBh
+-----END PRIVATE KEY-----
+EOH
}
# The "template" stanza instructs Nomad to manage a template, such as
@@ -335,24 +446,6 @@ groups:
annotations:
summary: "Host EDAC Uncorrectable Errors detected (instance {{ $labels.instance }})."
description: '{{ $labels.instance }} has had {{ printf "%.0f" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.'
-- name: "Min.io"
- rules:
- - alert: MinioDiskOffline
- expr: minio_offline_disks > 0
- for: 0m
- labels:
- severity: critical
- annotations:
- summary: "Minio disk offline (instance {{ $labels.instance }})"
- description: "Minio disk is offline."
- - alert: MinioStorageSpaceExhausted
- expr: minio_disk_storage_free_bytes / 1024 / 1024 / 1024 < 10
- for: 2m
- labels:
- severity: warning
- annotations:
- summary: "Minio storage space exhausted (instance {{ $labels.instance }})."
- description: "Minio storage space is low (< 10 GB)."
- name: "Prometheus"
rules:
- alert: PrometheusConfigurationReloadFailure
@@ -451,7 +544,6 @@ rule_files:
- 'alerts.yml'
scrape_configs:
-
- job_name: 'Nomad Cluster'
consul_sd_configs:
- server: '{{ env "NOMAD_IP_prometheus" }}:8500'
@@ -466,17 +558,12 @@ scrape_configs:
- job_name: 'Consul Cluster'
static_configs:
- - targets: [ '10.30.51.16:8500' ]
- - targets: [ '10.30.51.17:8500' ]
- - targets: [ '10.30.51.18:8500' ]
- - targets: [ '10.30.51.19:8500' ]
- - targets: [ '10.30.51.20:8500' ]
- - targets: [ '10.30.51.21:8500' ]
- - targets: [ '10.30.51.22:8500' ]
- targets: [ '10.30.51.23:8500' ]
- targets: [ '10.30.51.24:8500' ]
- targets: [ '10.30.51.25:8500' ]
- targets: [ '10.30.51.26:8500' ]
+ - targets: [ '10.30.51.27:8500' ]
+ - targets: [ '10.30.51.28:8500' ]
- targets: [ '10.30.51.50:8500' ]
- targets: [ '10.30.51.51:8500' ]
- targets: [ '10.30.51.70:8500' ]
@@ -503,17 +590,12 @@ scrape_configs:
- job_name: 'Node Exporter'
static_configs:
- - targets: [ '10.30.51.16:9100' ]
- - targets: [ '10.30.51.17:9100' ]
- - targets: [ '10.30.51.18:9100' ]
- - targets: [ '10.30.51.19:9100' ]
- - targets: [ '10.30.51.20:9100' ]
- - targets: [ '10.30.51.21:9100' ]
- - targets: [ '10.30.51.22:9100' ]
- targets: [ '10.30.51.23:9100' ]
- targets: [ '10.30.51.24:9100' ]
- targets: [ '10.30.51.25:9100' ]
- targets: [ '10.30.51.26:9100' ]
+ - targets: [ '10.30.51.27:9100' ]
+ - targets: [ '10.30.51.28:9100' ]
- targets: [ '10.30.51.50:9100' ]
- targets: [ '10.30.51.51:9100' ]
- targets: [ '10.30.51.70:9100' ]
@@ -526,39 +608,55 @@ scrape_configs:
- server: '{{ env "NOMAD_IP_prometheus" }}:8500'
services: [ 'alertmanager' ]
- - job_name: 'Grafana'
- consul_sd_configs:
- - server: '{{ env "NOMAD_IP_prometheus" }}:8500'
- services: [ 'grafana' ]
-
- job_name: 'Prometheus'
+ honor_timestamps: true
+ params:
+ format:
+ - prometheus
+ scheme: https
+ follow_redirects: true
+ enable_http2: true
consul_sd_configs:
- - server: '{{ env "NOMAD_IP_prometheus" }}:8500'
- services: [ 'prometheus' ]
+ - server: {{ env "CONSUL_HTTP_ADDR" }}
+ services:
+ - prometheus
+ tls_config:
+ cert_file: cert_file.crt
+ key_file: key_file.key
+ insecure_skip_verify: true
+EOH
+ }
- - job_name: 'Minio'
- bearer_token: eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjQ3NjQ1ODEzMzcsImlzcyI6InByb21ldGhldXMiLCJzdWIiOiJtaW5pbyJ9.oeTw3EIaiFmlDikrHXWiWXMH2vxLfDLkfjEC7G2N3M_keH_xyA_l2ofLLNYtopa_3GCEZnxLQdPuFZrmgpkDWg
- consul_sd_configs:
- - server: '{{ env "NOMAD_IP_prometheus" }}:8500'
- services: [ 'storage' ]
- metrics_path: /minio/prometheus/metrics
+ template {
+ change_mode = "noop"
+ change_signal = "SIGINT"
+ destination = "secrets/web-config.yml"
+ left_delimiter = "{{{"
+ right_delimiter = "}}}"
+ data = <<EOH
+---
+tls_server_config:
+ cert_file: cert_file.crt
+ key_file: key_file.key
EOH
}
# The service stanza instructs Nomad to register a service with Consul.
#
- # https://www.nomadproject.io/docs/job-specification/service
+ # https://www.nomadproject.io/docs/job-specification/service
#
service {
name = "${service_name}"
port = "${service_name}"
tags = [ "${service_name}$${NOMAD_ALLOC_INDEX}" ]
check {
- name = "Prometheus Check Live"
- type = "http"
- path = "/-/healthy"
- interval = "10s"
- timeout = "2s"
+ name = "Prometheus Check Live"
+ type = "http"
+ path = "/-/healthy"
+ protocol = "https"
+ tls_skip_verify = true
+ interval = "10s"
+ timeout = "2s"
}
}
@@ -567,7 +665,7 @@ EOH
# This ensures the task will execute on a machine that contains enough
# resource capacity.
#
- # https://www.nomadproject.io/docs/job-specification/resources
+ # https://www.nomadproject.io/docs/job-specification/resources
#
resources {
cpu = ${cpu}
diff --git a/fdio.infra.terraform/terraform-nomad-prometheus/fdio/main.tf b/fdio.infra.terraform/terraform-nomad-prometheus/fdio/main.tf
index e0ca417a78..054360c838 100644
--- a/fdio.infra.terraform/terraform-nomad-prometheus/fdio/main.tf
+++ b/fdio.infra.terraform/terraform-nomad-prometheus/fdio/main.tf
@@ -6,5 +6,5 @@ module "prometheus" {
# prometheus
datacenters = ["yul1"]
- pm_version = "2.33.1"
+ pm_version = "2.42.0"
} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-nomad-prometheus/fdio/versions.tf b/fdio.infra.terraform/terraform-nomad-prometheus/fdio/versions.tf
index 409f47958f..97a7173a64 100644
--- a/fdio.infra.terraform/terraform-nomad-prometheus/fdio/versions.tf
+++ b/fdio.infra.terraform/terraform-nomad-prometheus/fdio/versions.tf
@@ -1,17 +1,17 @@
terraform {
backend "consul" {
- address = "consul.service.consul:8500"
+ address = "10.30.51.23:8500"
scheme = "http"
path = "terraform/prometheus"
}
required_providers {
nomad = {
source = "hashicorp/nomad"
- version = ">= 1.4.16"
+ version = ">= 1.4.19"
}
vault = {
- version = ">= 3.2.1"
+ version = ">= 3.12.0"
}
}
- required_version = ">= 1.1.4"
+ required_version = ">= 1.3.7"
} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-nomad-prometheus/main.tf b/fdio.infra.terraform/terraform-nomad-prometheus/main.tf
index 10b71b8c82..0c609066e4 100644
--- a/fdio.infra.terraform/terraform-nomad-prometheus/main.tf
+++ b/fdio.infra.terraform/terraform-nomad-prometheus/main.tf
@@ -1,22 +1,25 @@
locals {
- datacenters = join(",", var.datacenters)
- url = join("",
+ artifact_source = join("",
[
"https://github.com",
"/prometheus/prometheus/releases/download/",
"v${var.pm_version}/prometheus-${var.pm_version}.linux-amd64.tar.gz"
]
)
+ datacenters = join(",", var.datacenters)
}
resource "nomad_job" "nomad_job_prometheus" {
jobspec = templatefile(
"${path.module}/conf/nomad/prometheus.hcl.tftpl",
{
+ artifact_source = local.artifact_source,
+ artifact_source_checksum = var.artifact_source_checksum,
auto_promote = var.auto_promote,
auto_revert = var.auto_revert,
canary = var.canary,
cpu = var.cpu,
+ constraint_value = var.constraint_value,
datacenters = local.datacenters,
group_count = var.group_count,
job_name = var.job_name,
@@ -25,7 +28,6 @@ resource "nomad_job" "nomad_job_prometheus" {
port = var.port,
region = var.region,
service_name = var.service_name,
- url = local.url,
use_canary = var.use_canary,
use_host_volume = var.use_host_volume,
use_vault_provider = var.vault_secret.use_vault_provider,
diff --git a/fdio.infra.terraform/terraform-nomad-prometheus/variables.tf b/fdio.infra.terraform/terraform-nomad-prometheus/variables.tf
index eab4d3b466..3c8c4b7a26 100644
--- a/fdio.infra.terraform/terraform-nomad-prometheus/variables.tf
+++ b/fdio.infra.terraform/terraform-nomad-prometheus/variables.tf
@@ -21,7 +21,7 @@ variable "volume_source" {
variable "pm_version" {
description = "Prometheus version"
type = string
- default = "2.33.1"
+ default = "2.42.0"
}
variable "auto_promote" {
@@ -48,6 +48,12 @@ variable "cpu" {
default = 2000
}
+variable "constraint_value" {
+ description = "The constraint allows restricting the set of eligible nodes."
+ type = string
+ default = "builder"
+}
+
variable "data_dir" {
description = "Prometheus DISK allocation"
type = string
@@ -102,6 +108,12 @@ variable "use_host_volume" {
default = true
}
+variable "artifact_source_checksum" {
+ description = "Prometheus release checksum"
+ type = string
+ default = "422dab055ed9c7bcaff52b718705f9192c6fac0de6b7e78dd278e70ee2663dcc"
+}
+
variable "volume_destination" {
description = "Specifies where the volume should be mounted inside the task"
type = string
diff --git a/fdio.infra.terraform/terraform-nomad-prometheus/versions.tf b/fdio.infra.terraform/terraform-nomad-prometheus/versions.tf
index a01708f28a..a319c35908 100644
--- a/fdio.infra.terraform/terraform-nomad-prometheus/versions.tf
+++ b/fdio.infra.terraform/terraform-nomad-prometheus/versions.tf
@@ -2,8 +2,8 @@ terraform {
required_providers {
nomad = {
source = "hashicorp/nomad"
- version = ">= 1.4.16"
+ version = ">= 1.4.19"
}
}
- required_version = ">= 1.1.4"
+ required_version = ">= 1.3.7"
}
diff --git a/fdio.infra.terraform/terraform-nomad-pyspark-etl/README.md b/fdio.infra.terraform/terraform-nomad-pyspark-etl/README.md
index 1147ddb16a..d61c8778d4 100644
--- a/fdio.infra.terraform/terraform-nomad-pyspark-etl/README.md
+++ b/fdio.infra.terraform/terraform-nomad-pyspark-etl/README.md
@@ -3,14 +3,14 @@
| Name | Version |
|------|---------|
-| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.1.4 |
-| <a name="requirement_nomad"></a> [nomad](#requirement\_nomad) | >= 1.4.16 |
+| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.5.4 |
+| <a name="requirement_nomad"></a> [nomad](#requirement\_nomad) | >= 1.4.20 |
## Providers
| Name | Version |
|------|---------|
-| <a name="provider_nomad"></a> [nomad](#provider\_nomad) | >= 1.4.16 |
+| <a name="provider_nomad"></a> [nomad](#provider\_nomad) | >= 1.4.20 |
## Modules
@@ -20,7 +20,7 @@ No modules.
| Name | Type |
|------|------|
-| [nomad_job.nomad_job_etl](https://registry.terraform.io/providers/hashicorp/nomad/latest/docs/resources/job) | resource |
+| [nomad_job.nomad_job](https://registry.terraform.io/providers/hashicorp/nomad/latest/docs/resources/job) | resource |
## Inputs
@@ -35,7 +35,7 @@ No modules.
| <a name="input_envs"></a> [envs](#input\_envs) | Specifies ETL environment variables. | `list(string)` | `[]` | no |
| <a name="input_image"></a> [image](#input\_image) | Specifies the Docker image to run. | `string` | `"pmikus/docker-ubuntu-focal-aws-glue:latest"` | no |
| <a name="input_job_name"></a> [job\_name](#input\_job\_name) | Specifies a name for the job. | `string` | `"etl"` | no |
-| <a name="input_memory"></a> [memory](#input\_memory) | Specifies the memory required in MB. | `number` | `20000` | no |
+| <a name="input_memory"></a> [memory](#input\_memory) | Specifies the memory required in MB. | `number` | `50000` | no |
| <a name="input_out_aws_access_key_id"></a> [out\_aws\_access\_key\_id](#input\_out\_aws\_access\_key\_id) | AWS access key. | `string` | `"aws"` | no |
| <a name="input_out_aws_default_region"></a> [out\_aws\_default\_region](#input\_out\_aws\_default\_region) | AWS region | `string` | `"aws"` | no |
| <a name="input_out_aws_secret_access_key"></a> [out\_aws\_secret\_access\_key](#input\_out\_aws\_secret\_access\_key) | AWS secret key | `string` | `"aws"` | no |
diff --git a/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-coverage-device-rls2402.hcl.tftpl b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-coverage-device-rls2402.hcl.tftpl
new file mode 100644
index 0000000000..cc0b1df8b5
--- /dev/null
+++ b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-coverage-device-rls2402.hcl.tftpl
@@ -0,0 +1,55 @@
+job "${job_name}" {
+ datacenters = "${datacenters}"
+ type = "${type}"
+ periodic {
+ cron = "${cron}"
+ prohibit_overlap = "${prohibit_overlap}"
+ time_zone = "${time_zone}"
+ }
+ group "${job_name}" {
+ restart {
+ mode = "fail"
+ }
+ constraint {
+ attribute = "$${attr.cpu.arch}"
+ operator = "!="
+ value = "arm64"
+ }
+ constraint {
+ attribute = "$${node.class}"
+ value = "builder"
+ }
+ task "${job_name}" {
+ artifact {
+ source = "git::https://github.com/FDio/csit"
+ destination = "local/csit"
+ }
+ driver = "docker"
+ config {
+ image = "${image}"
+ command = "gluesparksubmit"
+ args = [
+ "--driver-memory", "20g",
+ "--executor-memory", "20g",
+ "--executor-cores", "2",
+ "--master", "local[2]",
+ "coverage_device_rls2402.py"
+ ]
+ work_dir = "/local/csit/csit.infra.etl"
+ }
+ env {
+ AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
+ AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
+ AWS_DEFAULT_REGION = "${aws_default_region}"
+ OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
+ OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
+ OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
+ ${ envs }
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-coverage-hoststack-rls2402.hcl.tftpl b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-coverage-hoststack-rls2402.hcl.tftpl
new file mode 100644
index 0000000000..95d7a4c46e
--- /dev/null
+++ b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-coverage-hoststack-rls2402.hcl.tftpl
@@ -0,0 +1,55 @@
+job "${job_name}" {
+ datacenters = "${datacenters}"
+ type = "${type}"
+ periodic {
+ cron = "${cron}"
+ prohibit_overlap = "${prohibit_overlap}"
+ time_zone = "${time_zone}"
+ }
+ group "${job_name}" {
+ restart {
+ mode = "fail"
+ }
+ constraint {
+ attribute = "$${attr.cpu.arch}"
+ operator = "!="
+ value = "arm64"
+ }
+ constraint {
+ attribute = "$${node.class}"
+ value = "builder"
+ }
+ task "${job_name}" {
+ artifact {
+ source = "git::https://github.com/FDio/csit"
+ destination = "local/csit"
+ }
+ driver = "docker"
+ config {
+ image = "${image}"
+ command = "gluesparksubmit"
+ args = [
+ "--driver-memory", "20g",
+ "--executor-memory", "20g",
+ "--executor-cores", "2",
+ "--master", "local[2]",
+ "coverage_hoststack_rls2402.py"
+ ]
+ work_dir = "/local/csit/csit.infra.etl"
+ }
+ env {
+ AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
+ AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
+ AWS_DEFAULT_REGION = "${aws_default_region}"
+ OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
+ OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
+ OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
+ ${ envs }
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-coverage-mrr-rls2402.hcl.tftpl b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-coverage-mrr-rls2402.hcl.tftpl
new file mode 100644
index 0000000000..3bab9264fa
--- /dev/null
+++ b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-coverage-mrr-rls2402.hcl.tftpl
@@ -0,0 +1,55 @@
+job "${job_name}" {
+ datacenters = "${datacenters}"
+ type = "${type}"
+ periodic {
+ cron = "${cron}"
+ prohibit_overlap = "${prohibit_overlap}"
+ time_zone = "${time_zone}"
+ }
+ group "${job_name}" {
+ restart {
+ mode = "fail"
+ }
+ constraint {
+ attribute = "$${attr.cpu.arch}"
+ operator = "!="
+ value = "arm64"
+ }
+ constraint {
+ attribute = "$${node.class}"
+ value = "builder"
+ }
+ task "${job_name}" {
+ artifact {
+ source = "git::https://github.com/FDio/csit"
+ destination = "local/csit"
+ }
+ driver = "docker"
+ config {
+ image = "${image}"
+ command = "gluesparksubmit"
+ args = [
+ "--driver-memory", "20g",
+ "--executor-memory", "20g",
+ "--executor-cores", "2",
+ "--master", "local[2]",
+ "coverage_mrr_rls2402.py"
+ ]
+ work_dir = "/local/csit/csit.infra.etl"
+ }
+ env {
+ AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
+ AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
+ AWS_DEFAULT_REGION = "${aws_default_region}"
+ OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
+ OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
+ OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
+ ${ envs }
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-coverage-ndrpdr-rls2402.hcl.tftpl b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-coverage-ndrpdr-rls2402.hcl.tftpl
new file mode 100644
index 0000000000..6142219546
--- /dev/null
+++ b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-coverage-ndrpdr-rls2402.hcl.tftpl
@@ -0,0 +1,55 @@
+job "${job_name}" {
+ datacenters = "${datacenters}"
+ type = "${type}"
+ periodic {
+ cron = "${cron}"
+ prohibit_overlap = "${prohibit_overlap}"
+ time_zone = "${time_zone}"
+ }
+ group "${job_name}" {
+ restart {
+ mode = "fail"
+ }
+ constraint {
+ attribute = "$${attr.cpu.arch}"
+ operator = "!="
+ value = "arm64"
+ }
+ constraint {
+ attribute = "$${node.class}"
+ value = "builder"
+ }
+ task "${job_name}" {
+ artifact {
+ source = "git::https://github.com/FDio/csit"
+ destination = "local/csit"
+ }
+ driver = "docker"
+ config {
+ image = "${image}"
+ command = "gluesparksubmit"
+ args = [
+ "--driver-memory", "20g",
+ "--executor-memory", "20g",
+ "--executor-cores", "2",
+ "--master", "local[2]",
+ "coverage_ndrpdr_rls2402.py"
+ ]
+ work_dir = "/local/csit/csit.infra.etl"
+ }
+ env {
+ AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
+ AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
+ AWS_DEFAULT_REGION = "${aws_default_region}"
+ OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
+ OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
+ OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
+ ${ envs }
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-coverage-reconf-rls2402.hcl.tftpl b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-coverage-reconf-rls2402.hcl.tftpl
new file mode 100644
index 0000000000..b474e75217
--- /dev/null
+++ b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-coverage-reconf-rls2402.hcl.tftpl
@@ -0,0 +1,55 @@
+job "${job_name}" {
+ datacenters = "${datacenters}"
+ type = "${type}"
+ periodic {
+ cron = "${cron}"
+ prohibit_overlap = "${prohibit_overlap}"
+ time_zone = "${time_zone}"
+ }
+ group "${job_name}" {
+ restart {
+ mode = "fail"
+ }
+ constraint {
+ attribute = "$${attr.cpu.arch}"
+ operator = "!="
+ value = "arm64"
+ }
+ constraint {
+ attribute = "$${node.class}"
+ value = "builder"
+ }
+ task "${job_name}" {
+ artifact {
+ source = "git::https://github.com/FDio/csit"
+ destination = "local/csit"
+ }
+ driver = "docker"
+ config {
+ image = "${image}"
+ command = "gluesparksubmit"
+ args = [
+ "--driver-memory", "20g",
+ "--executor-memory", "20g",
+ "--executor-cores", "2",
+ "--master", "local[2]",
+ "coverage_reconf_rls2402.py"
+ ]
+ work_dir = "/local/csit/csit.infra.etl"
+ }
+ env {
+ AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
+ AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
+ AWS_DEFAULT_REGION = "${aws_default_region}"
+ OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
+ OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
+ OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
+ ${ envs }
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-coverage-soak-rls2402.hcl.tftpl b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-coverage-soak-rls2402.hcl.tftpl
new file mode 100644
index 0000000000..0352e1e879
--- /dev/null
+++ b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-coverage-soak-rls2402.hcl.tftpl
@@ -0,0 +1,55 @@
+job "${job_name}" {
+ datacenters = "${datacenters}"
+ type = "${type}"
+ periodic {
+ cron = "${cron}"
+ prohibit_overlap = "${prohibit_overlap}"
+ time_zone = "${time_zone}"
+ }
+ group "${job_name}" {
+ restart {
+ mode = "fail"
+ }
+ constraint {
+ attribute = "$${attr.cpu.arch}"
+ operator = "!="
+ value = "arm64"
+ }
+ constraint {
+ attribute = "$${node.class}"
+ value = "builder"
+ }
+ task "${job_name}" {
+ artifact {
+ source = "git::https://github.com/FDio/csit"
+ destination = "local/csit"
+ }
+ driver = "docker"
+ config {
+ image = "${image}"
+ command = "gluesparksubmit"
+ args = [
+ "--driver-memory", "20g",
+ "--executor-memory", "20g",
+ "--executor-cores", "2",
+ "--master", "local[2]",
+ "coverage_soak_rls2402.py"
+ ]
+ work_dir = "/local/csit/csit.infra.etl"
+ }
+ env {
+ AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
+ AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
+ AWS_DEFAULT_REGION = "${aws_default_region}"
+ OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
+ OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
+ OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
+ ${ envs }
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-iterative-hoststack-rls2402.hcl.tftpl b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-iterative-hoststack-rls2402.hcl.tftpl
new file mode 100644
index 0000000000..74478c59f7
--- /dev/null
+++ b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-iterative-hoststack-rls2402.hcl.tftpl
@@ -0,0 +1,55 @@
+job "${job_name}" {
+ datacenters = "${datacenters}"
+ type = "${type}"
+ periodic {
+ cron = "${cron}"
+ prohibit_overlap = "${prohibit_overlap}"
+ time_zone = "${time_zone}"
+ }
+ group "${job_name}" {
+ restart {
+ mode = "fail"
+ }
+ constraint {
+ attribute = "$${attr.cpu.arch}"
+ operator = "!="
+ value = "arm64"
+ }
+ constraint {
+ attribute = "$${node.class}"
+ value = "builder"
+ }
+ task "${job_name}" {
+ artifact {
+ source = "git::https://github.com/FDio/csit"
+ destination = "local/csit"
+ }
+ driver = "docker"
+ config {
+ image = "${image}"
+ command = "gluesparksubmit"
+ args = [
+ "--driver-memory", "20g",
+ "--executor-memory", "20g",
+ "--executor-cores", "2",
+ "--master", "local[2]",
+ "iterative_hoststack_rls2402.py"
+ ]
+ work_dir = "/local/csit/csit.infra.etl"
+ }
+ env {
+ AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
+ AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
+ AWS_DEFAULT_REGION = "${aws_default_region}"
+ OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
+ OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
+ OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
+ ${ envs }
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+}
diff --git a/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-iterative-mrr-rls2402.hcl.tftpl b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-iterative-mrr-rls2402.hcl.tftpl
new file mode 100644
index 0000000000..e6bd87b8ed
--- /dev/null
+++ b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-iterative-mrr-rls2402.hcl.tftpl
@@ -0,0 +1,55 @@
+job "${job_name}" {
+ datacenters = "${datacenters}"
+ type = "${type}"
+ periodic {
+ cron = "${cron}"
+ prohibit_overlap = "${prohibit_overlap}"
+ time_zone = "${time_zone}"
+ }
+ group "${job_name}" {
+ restart {
+ mode = "fail"
+ }
+ constraint {
+ attribute = "$${attr.cpu.arch}"
+ operator = "!="
+ value = "arm64"
+ }
+ constraint {
+ attribute = "$${node.class}"
+ value = "builder"
+ }
+ task "${job_name}" {
+ artifact {
+ source = "git::https://github.com/FDio/csit"
+ destination = "local/csit"
+ }
+ driver = "docker"
+ config {
+ image = "${image}"
+ command = "gluesparksubmit"
+ args = [
+ "--driver-memory", "20g",
+ "--executor-memory", "20g",
+ "--executor-cores", "2",
+ "--master", "local[2]",
+ "iterative_mrr_rls2402.py"
+ ]
+ work_dir = "/local/csit/csit.infra.etl"
+ }
+ env {
+ AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
+ AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
+ AWS_DEFAULT_REGION = "${aws_default_region}"
+ OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
+ OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
+ OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
+ ${ envs }
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+}
diff --git a/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-iterative-ndrpdr-rls2402.hcl.tftpl b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-iterative-ndrpdr-rls2402.hcl.tftpl
new file mode 100644
index 0000000000..4a40321377
--- /dev/null
+++ b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-iterative-ndrpdr-rls2402.hcl.tftpl
@@ -0,0 +1,55 @@
+job "${job_name}" {
+ datacenters = "${datacenters}"
+ type = "${type}"
+ periodic {
+ cron = "${cron}"
+ prohibit_overlap = "${prohibit_overlap}"
+ time_zone = "${time_zone}"
+ }
+ group "${job_name}" {
+ restart {
+ mode = "fail"
+ }
+ constraint {
+ attribute = "$${attr.cpu.arch}"
+ operator = "!="
+ value = "arm64"
+ }
+ constraint {
+ attribute = "$${node.class}"
+ value = "builder"
+ }
+ task "${job_name}" {
+ artifact {
+ source = "git::https://github.com/FDio/csit"
+ destination = "local/csit"
+ }
+ driver = "docker"
+ config {
+ image = "${image}"
+ command = "gluesparksubmit"
+ args = [
+ "--driver-memory", "20g",
+ "--executor-memory", "20g",
+ "--executor-cores", "2",
+ "--master", "local[2]",
+ "iterative_ndrpdr_rls2402.py"
+ ]
+ work_dir = "/local/csit/csit.infra.etl"
+ }
+ env {
+ AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
+ AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
+ AWS_DEFAULT_REGION = "${aws_default_region}"
+ OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
+ OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
+ OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
+ ${ envs }
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+}
diff --git a/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-iterative-reconf-rls2402.hcl.tftpl b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-iterative-reconf-rls2402.hcl.tftpl
new file mode 100644
index 0000000000..670dd37a11
--- /dev/null
+++ b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-iterative-reconf-rls2402.hcl.tftpl
@@ -0,0 +1,55 @@
+job "${job_name}" {
+ datacenters = "${datacenters}"
+ type = "${type}"
+ periodic {
+ cron = "${cron}"
+ prohibit_overlap = "${prohibit_overlap}"
+ time_zone = "${time_zone}"
+ }
+ group "${job_name}" {
+ restart {
+ mode = "fail"
+ }
+ constraint {
+ attribute = "$${attr.cpu.arch}"
+ operator = "!="
+ value = "arm64"
+ }
+ constraint {
+ attribute = "$${node.class}"
+ value = "builder"
+ }
+ task "${job_name}" {
+ artifact {
+ source = "git::https://github.com/FDio/csit"
+ destination = "local/csit"
+ }
+ driver = "docker"
+ config {
+ image = "${image}"
+ command = "gluesparksubmit"
+ args = [
+ "--driver-memory", "20g",
+ "--executor-memory", "20g",
+ "--executor-cores", "2",
+ "--master", "local[2]",
+ "iterative_reconf_rls2402.py"
+ ]
+ work_dir = "/local/csit/csit.infra.etl"
+ }
+ env {
+ AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
+ AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
+ AWS_DEFAULT_REGION = "${aws_default_region}"
+ OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
+ OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
+ OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
+ ${ envs }
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+}
diff --git a/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-iterative-soak-rls2402.hcl.tftpl b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-iterative-soak-rls2402.hcl.tftpl
new file mode 100644
index 0000000000..c4ad363879
--- /dev/null
+++ b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-iterative-soak-rls2402.hcl.tftpl
@@ -0,0 +1,55 @@
+job "${job_name}" {
+ datacenters = "${datacenters}"
+ type = "${type}"
+ periodic {
+ cron = "${cron}"
+ prohibit_overlap = "${prohibit_overlap}"
+ time_zone = "${time_zone}"
+ }
+ group "${job_name}" {
+ restart {
+ mode = "fail"
+ }
+ constraint {
+ attribute = "$${attr.cpu.arch}"
+ operator = "!="
+ value = "arm64"
+ }
+ constraint {
+ attribute = "$${node.class}"
+ value = "builder"
+ }
+ task "${job_name}" {
+ artifact {
+ source = "git::https://github.com/FDio/csit"
+ destination = "local/csit"
+ }
+ driver = "docker"
+ config {
+ image = "${image}"
+ command = "gluesparksubmit"
+ args = [
+ "--driver-memory", "20g",
+ "--executor-memory", "20g",
+ "--executor-cores", "2",
+ "--master", "local[2]",
+ "iterative_soak_rls2402.py"
+ ]
+ work_dir = "/local/csit/csit.infra.etl"
+ }
+ env {
+ AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
+ AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
+ AWS_DEFAULT_REGION = "${aws_default_region}"
+ OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
+ OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
+ OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
+ ${ envs }
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+}
diff --git a/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-stats.hcl.tftpl b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-stats.hcl.tftpl
new file mode 100644
index 0000000000..86ca584de7
--- /dev/null
+++ b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-stats.hcl.tftpl
@@ -0,0 +1,53 @@
+job "${job_name}" {
+ datacenters = "${datacenters}"
+ type = "${type}"
+ periodic {
+ cron = "${cron}"
+ prohibit_overlap = "${prohibit_overlap}"
+ time_zone = "${time_zone}"
+ }
+ group "${job_name}" {
+ restart {
+ mode = "fail"
+ }
+ constraint {
+ attribute = "$${attr.cpu.arch}"
+ operator = "!="
+ value = "arm64"
+ }
+ constraint {
+ attribute = "$${node.class}"
+ value = "builder"
+ }
+ task "${job_name}" {
+ artifact {
+ source = "git::https://github.com/FDio/csit"
+ destination = "local/csit"
+ }
+ driver = "docker"
+ config {
+ image = "${image}"
+ command = "gluesparksubmit"
+ args = [
+ "--driver-memory", "10g",
+ "--executor-memory", "10g",
+ "stats.py"
+ ]
+ work_dir = "/local/csit/csit.infra.etl"
+ }
+ env {
+ AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
+ AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
+ AWS_DEFAULT_REGION = "${aws_default_region}"
+ OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
+ OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
+ OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
+ ${ envs }
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+}
diff --git a/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-trending-hoststack.hcl.tftpl b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-trending-hoststack.hcl.tftpl
new file mode 100644
index 0000000000..24aa4095d2
--- /dev/null
+++ b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-trending-hoststack.hcl.tftpl
@@ -0,0 +1,53 @@
+job "${job_name}" {
+ datacenters = "${datacenters}"
+ type = "${type}"
+ periodic {
+ cron = "${cron}"
+ prohibit_overlap = "${prohibit_overlap}"
+ time_zone = "${time_zone}"
+ }
+ group "${job_name}" {
+ restart {
+ mode = "fail"
+ }
+ constraint {
+ attribute = "$${attr.cpu.arch}"
+ operator = "!="
+ value = "arm64"
+ }
+ constraint {
+ attribute = "$${node.class}"
+ value = "builder"
+ }
+ task "${job_name}" {
+ artifact {
+ source = "git::https://github.com/FDio/csit"
+ destination = "local/csit"
+ }
+ driver = "docker"
+ config {
+ image = "${image}"
+ command = "gluesparksubmit"
+ args = [
+ "--driver-memory", "30g",
+ "--executor-memory", "30g",
+ "trending_hoststack.py"
+ ]
+ work_dir = "/local/csit/csit.infra.etl"
+ }
+ env {
+ AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
+ AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
+ AWS_DEFAULT_REGION = "${aws_default_region}"
+ OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
+ OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
+ OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
+ ${ envs }
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-trending-mrr.hcl.tftpl b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-trending-mrr.hcl.tftpl
new file mode 100644
index 0000000000..47d6149eed
--- /dev/null
+++ b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-trending-mrr.hcl.tftpl
@@ -0,0 +1,53 @@
+job "${job_name}" {
+ datacenters = "${datacenters}"
+ type = "${type}"
+ periodic {
+ cron = "${cron}"
+ prohibit_overlap = "${prohibit_overlap}"
+ time_zone = "${time_zone}"
+ }
+ group "${job_name}" {
+ restart {
+ mode = "fail"
+ }
+ constraint {
+ attribute = "$${attr.cpu.arch}"
+ operator = "!="
+ value = "arm64"
+ }
+ constraint {
+ attribute = "$${node.class}"
+ value = "builder"
+ }
+ task "${job_name}" {
+ artifact {
+ source = "git::https://github.com/FDio/csit"
+ destination = "local/csit"
+ }
+ driver = "docker"
+ config {
+ image = "${image}"
+ command = "gluesparksubmit"
+ args = [
+ "--driver-memory", "30g",
+ "--executor-memory", "30g",
+ "trending_mrr.py"
+ ]
+ work_dir = "/local/csit/csit.infra.etl"
+ }
+ env {
+ AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
+ AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
+ AWS_DEFAULT_REGION = "${aws_default_region}"
+ OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
+ OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
+ OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
+ ${ envs }
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-trending-ndrpdr.hcl.tftpl b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-trending-ndrpdr.hcl.tftpl
new file mode 100644
index 0000000000..8cd40f537e
--- /dev/null
+++ b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-trending-ndrpdr.hcl.tftpl
@@ -0,0 +1,55 @@
+job "${job_name}" {
+ datacenters = "${datacenters}"
+ type = "${type}"
+ periodic {
+ cron = "${cron}"
+ prohibit_overlap = "${prohibit_overlap}"
+ time_zone = "${time_zone}"
+ }
+ group "${job_name}" {
+ restart {
+ mode = "fail"
+ }
+ constraint {
+ attribute = "$${attr.cpu.arch}"
+ operator = "!="
+ value = "arm64"
+ }
+ constraint {
+ attribute = "$${node.class}"
+ value = "builder"
+ }
+ task "${job_name}" {
+ artifact {
+ source = "git::https://github.com/FDio/csit"
+ destination = "local/csit"
+ }
+ driver = "docker"
+ config {
+ image = "${image}"
+ command = "gluesparksubmit"
+ args = [
+ "--driver-memory", "30g",
+ "--executor-memory", "30g",
+ "--executor-cores", "2",
+ "--master", "local[2]",
+ "trending_ndrpdr.py"
+ ]
+ work_dir = "/local/csit/csit.infra.etl"
+ }
+ env {
+ AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
+ AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
+ AWS_DEFAULT_REGION = "${aws_default_region}"
+ OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
+ OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
+ OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
+ ${ envs }
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-trending-soak.hcl.tftpl b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-trending-soak.hcl.tftpl
new file mode 100644
index 0000000000..6d77a898df
--- /dev/null
+++ b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl-trending-soak.hcl.tftpl
@@ -0,0 +1,55 @@
+job "${job_name}" {
+ datacenters = "${datacenters}"
+ type = "${type}"
+ periodic {
+ cron = "${cron}"
+ prohibit_overlap = "${prohibit_overlap}"
+ time_zone = "${time_zone}"
+ }
+ group "${job_name}" {
+ restart {
+ mode = "fail"
+ }
+ constraint {
+ attribute = "$${attr.cpu.arch}"
+ operator = "!="
+ value = "arm64"
+ }
+ constraint {
+ attribute = "$${node.class}"
+ value = "builder"
+ }
+ task "${job_name}" {
+ artifact {
+ source = "git::https://github.com/FDio/csit"
+ destination = "local/csit"
+ }
+ driver = "docker"
+ config {
+ image = "${image}"
+ command = "gluesparksubmit"
+ args = [
+ "--driver-memory", "30g",
+ "--executor-memory", "30g",
+ "--executor-cores", "2",
+ "--master", "local[2]",
+ "trending_soak.py"
+ ]
+ work_dir = "/local/csit/csit.infra.etl"
+ }
+ env {
+ AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
+ AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
+ AWS_DEFAULT_REGION = "${aws_default_region}"
+ OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
+ OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
+ OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
+ ${ envs }
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+}
diff --git a/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl.hcl.tftpl b/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl.hcl.tftpl
deleted file mode 100644
index 0d0ecfa318..0000000000
--- a/fdio.infra.terraform/terraform-nomad-pyspark-etl/conf/nomad/etl.hcl.tftpl
+++ /dev/null
@@ -1,322 +0,0 @@
-job "${job_name}" {
- # The "datacenters" parameter specifies the list of datacenters which should
- # be considered when placing this task. This must be provided.
- datacenters = "${datacenters}"
-
- # The "type" parameter controls the type of job, which impacts the scheduler's
- # decision on placement. For a full list of job types and their differences,
- # please see the online documentation.
- #
- # https://www.nomadproject.io/docs/jobspec/schedulers
- #
- type = "${type}"
-
- # The periodic stanza allows a job to run at fixed times, dates, or intervals.
- # The easiest way to think about the periodic scheduler is "Nomad cron" or
- # "distributed cron".
- #
- # https://www.nomadproject.io/docs/job-specification/periodic
- #
- periodic {
- cron = "${cron}"
- prohibit_overlap = "${prohibit_overlap}"
- time_zone = "${time_zone}"
- }
-
- # The "group" stanza defines a series of tasks that should be co-located on
- # the same Nomad client. Any task within a group will be placed on the same
- # client.
- #
- # https://www.nomadproject.io/docs/job-specification/group
- #
- group "${job_name}-master" {
- # The restart stanza configures a tasks's behavior on task failure. Restarts
- # happen on the client that is running the task.
- #
- # https://www.nomadproject.io/docs/job-specification/restart
- #
- restart {
- mode = "fail"
- }
-
- # The constraint allows restricting the set of eligible nodes. Constraints
- # may filter on attributes or client metadata.
- #
- # For more information and examples on the "volume" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/constraint
- #
- constraint {
- attribute = "$${attr.cpu.arch}"
- operator = "!="
- value = "arm64"
- }
-
- constraint {
- attribute = "$${node.class}"
- value = "builder"
- }
-
- # The "task" stanza creates an individual unit of work, such as a Docker
- # container, web application, or batch processing.
- #
- # https://www.nomadproject.io/docs/job-specification/task.html
- #
- task "${job_name}-trending" {
- # The artifact stanza instructs Nomad to fetch and unpack a remote
- # resource, such as a file, tarball, or binary.
- #
- # https://www.nomadproject.io/docs/job-specification/artifact
- #
- artifact {
- source = "git::https://github.com/FDio/csit"
- destination = "local/csit"
- }
-
- # The "driver" parameter specifies the task driver that should be used to
- # run the task.
- driver = "docker"
-
- # The "config" stanza specifies the driver configuration, which is passed
- # directly to the driver to start the task. The details of configurations
- # are specific to each driver, so please see specific driver
- # documentation for more information.
- config {
- image = "${image}"
- command = "gluesparksubmit"
- args = [
- "--driver-memory", "30g",
- "--executor-memory", "30g",
- "trending.py"
- ]
- work_dir = "/local/csit/csit.infra.etl"
- }
-
- # The env stanza configures a list of environment variables to populate
- # the task's environment before starting.
- env {
- AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
- AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
- AWS_DEFAULT_REGION = "${aws_default_region}"
- OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
- OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
- OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
- ${ envs }
- }
-
- # The "resources" stanza describes the requirements a task needs to
- # execute. Resource requirements include memory, network, cpu, and more.
- # This ensures the task will execute on a machine that contains enough
- # resource capacity.
- #
- # https://www.nomadproject.io/docs/job-specification/resources
- #
- resources {
- cpu = ${cpu}
- memory = ${memory}
- }
- }
- task "${job_name}-stats" {
- # The artifact stanza instructs Nomad to fetch and unpack a remote
- # resource, such as a file, tarball, or binary.
- #
- # https://www.nomadproject.io/docs/job-specification/artifact
- #
- artifact {
- source = "git::https://github.com/FDio/csit"
- destination = "local/csit"
- }
-
- # The "driver" parameter specifies the task driver that should be used to
- # run the task.
- driver = "docker"
-
- # The "config" stanza specifies the driver configuration, which is passed
- # directly to the driver to start the task. The details of configurations
- # are specific to each driver, so please see specific driver
- # documentation for more information.
- config {
- image = "${image}"
- command = "gluesparksubmit"
- args = [
- "--driver-memory", "10g",
- "--executor-memory", "10g",
- "stats.py"
- ]
- work_dir = "/local/csit/csit.infra.etl"
- }
-
- # The env stanza configures a list of environment variables to populate
- # the task's environment before starting.
- env {
- AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
- AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
- AWS_DEFAULT_REGION = "${aws_default_region}"
- OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
- OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
- OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
- ${ envs }
- }
-
- # The "resources" stanza describes the requirements a task needs to
- # execute. Resource requirements include memory, network, cpu, and more.
- # This ensures the task will execute on a machine that contains enough
- # resource capacity.
- #
- # https://www.nomadproject.io/docs/job-specification/resources
- #
- resources {
- cpu = ${cpu}
- memory = ${memory}
- }
- }
- }
-# group "${job_name}-rls2206" {
-# # The restart stanza configures a tasks's behavior on task failure. Restarts
-# # happen on the client that is running the task.
-# #
-# # https://www.nomadproject.io/docs/job-specification/restart
-# #
-# restart {
-# mode = "fail"
-# }
-#
-# # The constraint allows restricting the set of eligible nodes. Constraints
-# # may filter on attributes or client metadata.
-# #
-# # For more information and examples on the "volume" stanza, please see
-# # the online documentation at:
-# #
-# # https://www.nomadproject.io/docs/job-specification/constraint
-# #
-# constraint {
-# attribute = "$${attr.cpu.arch}"
-# operator = "!="
-# value = "arm64"
-# }
-#
-# constraint {
-# attribute = "$${node.class}"
-# value = "builder"
-# }
-#
-# # The "task" stanza creates an individual unit of work, such as a Docker
-# # container, web application, or batch processing.
-# #
-# # https://www.nomadproject.io/docs/job-specification/task.html
-# #
-# task "${job_name}-coverage" {
-# # The artifact stanza instructs Nomad to fetch and unpack a remote
-# # resource, such as a file, tarball, or binary.
-# #
-# # https://www.nomadproject.io/docs/job-specification/artifact
-# #
-# artifact {
-# source = "git::https://github.com/FDio/csit"
-# destination = "local/csit"
-# }
-#
-# # The "driver" parameter specifies the task driver that should be used to
-# # run the task.
-# driver = "docker"
-#
-# # The "config" stanza specifies the driver configuration, which is passed
-# # directly to the driver to start the task. The details of configurations
-# # are specific to each driver, so please see specific driver
-# # documentation for more information.
-# config {
-# image = "${image}"
-# command = "gluesparksubmit"
-# args = [
-# "--driver-memory", "20g",
-# "--executor-memory", "20g",
-# "--executor-cores", "2",
-# "--master", "local[2]",
-# "coverage_rls2206.py"
-# ]
-# work_dir = "/local/csit/csit.infra.etl"
-# }
-#
-# # The env stanza configures a list of environment variables to populate
-# # the task's environment before starting.
-# env {
-# AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
-# AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
-# AWS_DEFAULT_REGION = "${aws_default_region}"
-# OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
-# OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
-# OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
-# ${ envs }
-# }
-#
-# # The "resources" stanza describes the requirements a task needs to
-# # execute. Resource requirements include memory, network, cpu, and more.
-# # This ensures the task will execute on a machine that contains enough
-# # resource capacity.
-# #
-# # https://www.nomadproject.io/docs/job-specification/resources
-# #
-# resources {
-# cpu = ${cpu}
-# memory = ${memory}
-# }
-# }
-# task "${job_name}-iterative" {
-# # The artifact stanza instructs Nomad to fetch and unpack a remote
-# # resource, such as a file, tarball, or binary.
-# #
-# # https://www.nomadproject.io/docs/job-specification/artifact
-# #
-# artifact {
-# source = "git::https://github.com/FDio/csit"
-# destination = "local/csit"
-# }
-#
-# # The "driver" parameter specifies the task driver that should be used to
-# # run the task.
-# driver = "docker"
-#
-# # The "config" stanza specifies the driver configuration, which is passed
-# # directly to the driver to start the task. The details of configurations
-# # are specific to each driver, so please see specific driver
-# # documentation for more information.
-# config {
-# image = "${image}"
-# command = "gluesparksubmit"
-# args = [
-# "--driver-memory", "20g",
-# "--executor-memory", "20g",
-# "--executor-cores", "2",
-# "--master", "local[2]",
-# "iterative_rls2206.py"
-# ]
-# work_dir = "/local/csit/csit.infra.etl"
-# }
-#
-# # The env stanza configures a list of environment variables to populate
-# # the task's environment before starting.
-# env {
-# AWS_ACCESS_KEY_ID = "${aws_access_key_id}"
-# AWS_SECRET_ACCESS_KEY = "${aws_secret_access_key}"
-# AWS_DEFAULT_REGION = "${aws_default_region}"
-# OUT_AWS_ACCESS_KEY_ID = "${out_aws_access_key_id}"
-# OUT_AWS_SECRET_ACCESS_KEY = "${out_aws_secret_access_key}"
-# OUT_AWS_DEFAULT_REGION = "${out_aws_default_region}"
-# ${ envs }
-# }
-#
-# # The "resources" stanza describes the requirements a task needs to
-# # execute. Resource requirements include memory, network, cpu, and more.
-# # This ensures the task will execute on a machine that contains enough
-# # resource capacity.
-# #
-# # https://www.nomadproject.io/docs/job-specification/resources
-# #
-# resources {
-# cpu = ${cpu}
-# memory = ${memory}
-# }
-# }
-# }
-}
diff --git a/fdio.infra.terraform/terraform-nomad-pyspark-etl/fdio/main.tf b/fdio.infra.terraform/terraform-nomad-pyspark-etl/fdio/main.tf
index 3d2026f0f9..aac81d9b78 100644
--- a/fdio.infra.terraform/terraform-nomad-pyspark-etl/fdio/main.tf
+++ b/fdio.infra.terraform/terraform-nomad-pyspark-etl/fdio/main.tf
@@ -6,7 +6,7 @@ data "vault_generic_secret" "fdio_docs" {
path = "kv/secret/data/etl/fdio_docs"
}
-module "etl" {
+module "etl-stats" {
providers = {
nomad = nomad.yul1
}
@@ -18,6 +18,266 @@ module "etl" {
out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
- cron = "@daily"
+ cron = "0 30 0 * * * *"
datacenters = ["yul1"]
+ job_name = "etl-stats"
}
+
+module "etl-trending-hoststack" {
+ providers = {
+ nomad = nomad.yul1
+ }
+ source = "../"
+
+ aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+ aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+ aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+ out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+ out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+ out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+ cron = "0 30 0 * * * *"
+ datacenters = ["yul1"]
+ job_name = "etl-trending-hoststack"
+}
+
+module "etl-trending-mrr" {
+ providers = {
+ nomad = nomad.yul1
+ }
+ source = "../"
+
+ aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+ aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+ aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+ out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+ out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+ out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+ cron = "0 30 0 * * * *"
+ datacenters = ["yul1"]
+ job_name = "etl-trending-mrr"
+ memory = 60000
+}
+
+module "etl-trending-ndrpdr" {
+ providers = {
+ nomad = nomad.yul1
+ }
+ source = "../"
+
+ aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+ aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+ aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+ out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+ out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+ out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+ cron = "0 30 0 * * * *"
+ datacenters = ["yul1"]
+ job_name = "etl-trending-ndrpdr"
+ memory = 60000
+}
+
+module "etl-trending-soak" {
+ providers = {
+ nomad = nomad.yul1
+ }
+ source = "../"
+
+ aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+ aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+ aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+ out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+ out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+ out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+ cron = "0 30 0 * * * *"
+ datacenters = ["yul1"]
+ job_name = "etl-trending-soak"
+ memory = 60000
+}
+
+#module "etl-iterative-hoststack-rls2402" {
+# providers = {
+# nomad = nomad.yul1
+# }
+# source = "../"
+#
+# aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+# aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+# aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+# out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+# out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+# out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+# cron = "0 30 0 * * * *"
+# datacenters = ["yul1"]
+# job_name = "etl-iterative-hoststack-rls2402"
+#}
+#
+#module "etl-iterative-mrr-rls2402" {
+# providers = {
+# nomad = nomad.yul1
+# }
+# source = "../"
+#
+# aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+# aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+# aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+# out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+# out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+# out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+# cron = "0 30 0 * * * *"
+# datacenters = ["yul1"]
+# job_name = "etl-iterative-mrr-rls2402"
+#}
+#
+#module "etl-iterative-ndrpdr-rls2402" {
+# providers = {
+# nomad = nomad.yul1
+# }
+# source = "../"
+#
+# aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+# aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+# aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+# out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+# out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+# out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+# cron = "0 30 0 * * * *"
+# datacenters = ["yul1"]
+# job_name = "etl-iterative-ndrpdr-rls2402"
+#}
+#
+#module "etl-iterative-reconf-rls2402" {
+# providers = {
+# nomad = nomad.yul1
+# }
+# source = "../"
+#
+# aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+# aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+# aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+# out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+# out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+# out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+# cron = "0 30 0 * * * *"
+# datacenters = ["yul1"]
+# job_name = "etl-iterative-reconf-rls2402"
+#}
+#
+#module "etl-iterative-soak-rls2402" {
+# providers = {
+# nomad = nomad.yul1
+# }
+# source = "../"
+#
+# aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+# aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+# aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+# out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+# out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+# out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+# cron = "0 30 0 * * * *"
+# datacenters = ["yul1"]
+# job_name = "etl-iterative-soak-rls2402"
+#}
+#
+#module "etl-coverage-device-rls2402" {
+# providers = {
+# nomad = nomad.yul1
+# }
+# source = "../"
+#
+# aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+# aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+# aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+# out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+# out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+# out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+# cron = "0 30 0 * * * *"
+# datacenters = ["yul1"]
+# job_name = "etl-coverage-device-rls2402"
+#}
+#
+#module "etl-coverage-hoststack-rls2402" {
+# providers = {
+# nomad = nomad.yul1
+# }
+# source = "../"
+#
+# aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+# aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+# aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+# out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+# out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+# out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+# cron = "0 30 0 * * * *"
+# datacenters = ["yul1"]
+# job_name = "etl-coverage-hoststack-rls2402"
+#}
+#
+#module "etl-coverage-mrr-rls2402" {
+# providers = {
+# nomad = nomad.yul1
+# }
+# source = "../"
+#
+# aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+# aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+# aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+# out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+# out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+# out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+# cron = "0 30 0 * * * *"
+# datacenters = ["yul1"]
+# job_name = "etl-coverage-mrr-rls2402"
+#}
+#
+#module "etl-coverage-ndrpdr-rls2402" {
+# providers = {
+# nomad = nomad.yul1
+# }
+# source = "../"
+#
+# aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+# aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+# aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+# out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+# out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+# out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+# cron = "0 30 0 * * * *"
+# datacenters = ["yul1"]
+# job_name = "etl-coverage-ndrpdr-rls2402"
+#}
+#
+#module "etl-coverage-reconf-rls2402" {
+# providers = {
+# nomad = nomad.yul1
+# }
+# source = "../"
+#
+# aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+# aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+# aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+# out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+# out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+# out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+# cron = "0 30 0 * * * *"
+# datacenters = ["yul1"]
+# job_name = "etl-coverage-reconf-rls2402"
+#}
+#
+#module "etl-coverage-soak-rls2402" {
+# providers = {
+# nomad = nomad.yul1
+# }
+# source = "../"
+#
+# aws_access_key_id = data.vault_generic_secret.fdio_logs.data["access_key"]
+# aws_secret_access_key = data.vault_generic_secret.fdio_logs.data["secret_key"]
+# aws_default_region = data.vault_generic_secret.fdio_logs.data["region"]
+# out_aws_access_key_id = data.vault_generic_secret.fdio_docs.data["access_key"]
+# out_aws_secret_access_key = data.vault_generic_secret.fdio_docs.data["secret_key"]
+# out_aws_default_region = data.vault_generic_secret.fdio_docs.data["region"]
+# cron = "0 30 0 * * * *"
+# datacenters = ["yul1"]
+# job_name = "etl-coverage-soak-rls2402"
+#}
+# \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-nomad-pyspark-etl/fdio/variables.tf b/fdio.infra.terraform/terraform-nomad-pyspark-etl/fdio/variables.tf
index 31c462632b..db24bdf0fa 100644
--- a/fdio.infra.terraform/terraform-nomad-pyspark-etl/fdio/variables.tf
+++ b/fdio.infra.terraform/terraform-nomad-pyspark-etl/fdio/variables.tf
@@ -7,7 +7,7 @@ variable "nomad_acl" {
variable "nomad_provider_address" {
description = "FD.io Nomad cluster address."
type = string
- default = "http://nomad.service.consul:4646"
+ default = "http://10.30.51.23:4646"
}
variable "nomad_provider_ca_file" {
@@ -31,7 +31,7 @@ variable "nomad_provider_key_file" {
variable "vault_provider_address" {
description = "Vault cluster address."
type = string
- default = "http://vault.service.consul:8200"
+ default = "http://10.30.51.23:8200"
}
variable "vault_provider_skip_tls_verify" {
diff --git a/fdio.infra.terraform/terraform-nomad-pyspark-etl/fdio/versions.tf b/fdio.infra.terraform/terraform-nomad-pyspark-etl/fdio/versions.tf
index 24e7c94564..0c05e76d65 100644
--- a/fdio.infra.terraform/terraform-nomad-pyspark-etl/fdio/versions.tf
+++ b/fdio.infra.terraform/terraform-nomad-pyspark-etl/fdio/versions.tf
@@ -1,17 +1,17 @@
terraform {
backend "consul" {
- address = "vault.service.consul:8500"
+ address = "10.30.51.23:8500"
scheme = "http"
path = "terraform/etl"
}
required_providers {
nomad = {
source = "hashicorp/nomad"
- version = ">= 1.4.16"
+ version = ">= 1.4.20"
}
vault = {
- version = ">= 3.2.1"
+ version = ">= 3.12.0"
}
}
- required_version = ">= 1.1.4"
+ required_version = ">= 1.5.4"
}
diff --git a/fdio.infra.terraform/terraform-nomad-pyspark-etl/main.tf b/fdio.infra.terraform/terraform-nomad-pyspark-etl/main.tf
index c477da81a8..cd6a9a52ff 100644
--- a/fdio.infra.terraform/terraform-nomad-pyspark-etl/main.tf
+++ b/fdio.infra.terraform/terraform-nomad-pyspark-etl/main.tf
@@ -3,9 +3,9 @@ locals {
envs = join("\n", concat([], var.envs))
}
-resource "nomad_job" "nomad_job_etl" {
+resource "nomad_job" "nomad_job" {
jobspec = templatefile(
- "${path.module}/conf/nomad/etl.hcl.tftpl",
+ "${path.module}/conf/nomad/${var.job_name}.hcl.tftpl",
{
aws_access_key_id = var.aws_access_key_id,
aws_secret_access_key = var.aws_secret_access_key,
diff --git a/fdio.infra.terraform/terraform-nomad-pyspark-etl/variables.tf b/fdio.infra.terraform/terraform-nomad-pyspark-etl/variables.tf
index 9357c096f3..f6d318e855 100644
--- a/fdio.infra.terraform/terraform-nomad-pyspark-etl/variables.tf
+++ b/fdio.infra.terraform/terraform-nomad-pyspark-etl/variables.tf
@@ -27,7 +27,7 @@ variable "aws_default_region" {
variable "cpu" {
description = "Specifies the CPU required to run this task in MHz."
type = number
- default = 20000
+ default = 10000
}
variable "cron" {
@@ -57,7 +57,7 @@ variable "job_name" {
variable "memory" {
description = "Specifies the memory required in MB."
type = number
- default = 60000
+ default = 50000
}
variable "out_aws_access_key_id" {
diff --git a/fdio.infra.terraform/terraform-nomad-pyspark-etl/versions.tf b/fdio.infra.terraform/terraform-nomad-pyspark-etl/versions.tf
index a01708f28a..f40435fe77 100644
--- a/fdio.infra.terraform/terraform-nomad-pyspark-etl/versions.tf
+++ b/fdio.infra.terraform/terraform-nomad-pyspark-etl/versions.tf
@@ -2,8 +2,8 @@ terraform {
required_providers {
nomad = {
source = "hashicorp/nomad"
- version = ">= 1.4.16"
+ version = ">= 1.4.20"
}
}
- required_version = ">= 1.1.4"
+ required_version = ">= 1.5.4"
}
diff --git a/fdio.infra.terraform/terraform-nomad-vpp-device/conf/nomad/device-shim.hcl.tftpl b/fdio.infra.terraform/terraform-nomad-vpp-device/conf/nomad/device-shim.hcl.tftpl
new file mode 100644
index 0000000000..28e38a2d0b
--- /dev/null
+++ b/fdio.infra.terraform/terraform-nomad-vpp-device/conf/nomad/device-shim.hcl.tftpl
@@ -0,0 +1,78 @@
+job "${job_name}" {
+ datacenters = ["${datacenters}"]
+ type = "system"
+ group "${job_name}-amd" {
+ count = ${group_count}
+ constraint {
+ attribute = "$${node.class}"
+ value = "csit"
+ }
+ restart {
+ interval = "1m"
+ attempts = 3
+ delay = "15s"
+ mode = "delay"
+ }
+ network {
+ port "ssh" {
+ static = 6022
+ }
+ port "ssh2" {
+ static = 6023
+ }
+ }
+ task "${job_name}-amd" {
+ driver = "docker"
+ config {
+ image = "${image_x86_64}"
+ network_mode = "host"
+ pid_mode = "host"
+ volumes = [
+ "/var/run/docker.sock:/var/run/docker.sock"
+ ]
+ privileged = true
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+ group "${job_name}-arm" {
+ count = ${group_count}
+ constraint {
+ attribute = "$${node.class}"
+ value = "csitarm"
+ }
+ restart {
+ interval = "1m"
+ attempts = 3
+ delay = "15s"
+ mode = "delay"
+ }
+ network {
+ port "ssh" {
+ static = 6022
+ }
+ port "ssh2" {
+ static = 6023
+ }
+ }
+ task "${job_name}-arm" {
+ driver = "docker"
+ config {
+ image = "${image_aarch64}"
+ network_mode = "host"
+ pid_mode = "host"
+ volumes = [
+ "/var/run/docker.sock:/var/run/docker.sock"
+ ]
+ privileged = true
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+}
diff --git a/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/main.tf b/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/main.tf
new file mode 100644
index 0000000000..a6217d781f
--- /dev/null
+++ b/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/main.tf
@@ -0,0 +1,16 @@
+module "vpp-device" {
+ providers = {
+ nomad = nomad.yul1
+ }
+ source = "../"
+
+ # nomad
+ datacenters = ["yul1"]
+ job_name = "device-shim"
+ group_count = 1
+ cpu = 1500
+ memory = 4096
+ image_aarch64 = "fdiotools/csit_shim-ubuntu2004:2021_03_02_143938_UTC-aarch64"
+ image_x86_64 = "fdiotools/csit_shim-ubuntu2004:2021_03_04_142103_UTC-x86_64"
+}
+
diff --git a/fdio.infra.terraform/1n_nmd/providers.tf b/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/providers.tf
index 92ddb553e7..42a6a45ce0 100644
--- a/fdio.infra.terraform/1n_nmd/providers.tf
+++ b/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/providers.tf
@@ -7,7 +7,7 @@ provider "nomad" {
}
provider "vault" {
- address = "http://10.30.51.28:8200"
- skip_tls_verify = true
- token = var.token
+ address = var.vault_provider_address
+ skip_tls_verify = var.vault_provider_skip_tls_verify
+ token = var.vault_provider_token
} \ No newline at end of file
diff --git a/fdio.infra.terraform/1n_nmd/variables.tf b/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/variables.tf
index 598770eb13..569ba29c87 100644
--- a/fdio.infra.terraform/1n_nmd/variables.tf
+++ b/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/variables.tf
@@ -1,5 +1,5 @@
variable "nomad_acl" {
- description = "Nomad ACLs enabled/disabled"
+ description = "Nomad ACLs enabled/disabled."
type = bool
default = false
}
@@ -7,7 +7,7 @@ variable "nomad_acl" {
variable "nomad_provider_address" {
description = "FD.io Nomad cluster address."
type = string
- default = "http://nomad.service.consul:4646"
+ default = "http://10.30.51.23:4646"
}
variable "nomad_provider_ca_file" {
@@ -28,8 +28,20 @@ variable "nomad_provider_key_file" {
default = "/etc/nomad.d/ssl/nomad-cli-key.pem"
}
-variable "token" {
- description = "Vault root token"
+variable "vault_provider_address" {
+ description = "Vault cluster address."
+ type = string
+ default = "http://10.30.51.23:8200"
+}
+
+variable "vault_provider_skip_tls_verify" {
+ description = "Verification of the Vault server's TLS certificate."
+ type = bool
+ default = false
+}
+
+variable "vault_provider_token" {
+ description = "Vault root token."
type = string
sensitive = true
} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/versions.tf b/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/versions.tf
new file mode 100644
index 0000000000..fc5a3ab12d
--- /dev/null
+++ b/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/versions.tf
@@ -0,0 +1,15 @@
+terraform {
+ backend "consul" {
+ address = "10.30.51.23:8500"
+ scheme = "http"
+ path = "terraform/device-csit-shim"
+ }
+ required_providers {
+ nomad = {
+ source = "hashicorp/nomad"
+ version = ">= 1.4.20"
+ }
+ }
+ required_version = ">= 1.5.4"
+}
+
diff --git a/fdio.infra.terraform/terraform-nomad-vpp-device/main.tf b/fdio.infra.terraform/terraform-nomad-vpp-device/main.tf
new file mode 100644
index 0000000000..b176172d00
--- /dev/null
+++ b/fdio.infra.terraform/terraform-nomad-vpp-device/main.tf
@@ -0,0 +1,20 @@
+locals {
+ datacenters = join(",", var.datacenters)
+}
+
+resource "nomad_job" "nomad_job" {
+ jobspec = templatefile(
+ "${path.module}/conf/nomad/${var.job_name}.hcl.tftpl",
+ {
+ datacenters = local.datacenters,
+ job_name = var.job_name,
+ group_count = var.group_count,
+ cpu = var.cpu,
+ memory = var.memory,
+ image_aarch64 = var.image_aarch64,
+ image_x86_64 = var.image_x86_64
+ }
+ )
+ detach = false
+}
+
diff --git a/fdio.infra.terraform/1n_nmd/vpp_device/variables.tf b/fdio.infra.terraform/terraform-nomad-vpp-device/variables.tf
index 401be66f27..0a11e1da3b 100644
--- a/fdio.infra.terraform/1n_nmd/vpp_device/variables.tf
+++ b/fdio.infra.terraform/terraform-nomad-vpp-device/variables.tf
@@ -1,42 +1,42 @@
# Nomad
-variable "nomad_datacenters" {
- description = "Nomad data centers"
+variable "datacenters" {
+ description = "Specifies the list of DCs to be considered placing this task"
type = list(string)
default = ["dc1"]
}
# CSIT SHIM
-variable "csit_shim_job_name" {
+variable "job_name" {
description = "CSIT SHIM job name"
type = string
default = "prod-csit-shim"
}
-variable "csit_shim_group_count" {
+variable "group_count" {
description = "Number of CSIT SHIM group instances"
type = number
default = 1
}
-variable "csit_shim_cpu" {
+variable "cpu" {
description = "CSIT SHIM task CPU"
type = number
default = 2000
}
-variable "csit_shim_mem" {
+variable "memory" {
description = "CSIT SHIM task memory"
type = number
default = 10000
}
-variable "csit_shim_image_aarch64" {
+variable "image_aarch64" {
description = "CSIT SHIM AARCH64 docker image"
type = string
default = "fdiotools/csit_shim-ubuntu2004:prod-aarch64"
}
-variable "csit_shim_image_x86_64" {
+variable "image_x86_64" {
description = "CSIT SHIM X86_64 docker image"
type = string
default = "fdiotools/csit_shim-ubuntu2004:prod-x86_64"
diff --git a/fdio.infra.terraform/terraform-nomad-loki/versions.tf b/fdio.infra.terraform/terraform-nomad-vpp-device/versions.tf
index a01708f28a..f40435fe77 100644
--- a/fdio.infra.terraform/terraform-nomad-loki/versions.tf
+++ b/fdio.infra.terraform/terraform-nomad-vpp-device/versions.tf
@@ -2,8 +2,8 @@ terraform {
required_providers {
nomad = {
source = "hashicorp/nomad"
- version = ">= 1.4.16"
+ version = ">= 1.4.20"
}
}
- required_version = ">= 1.1.4"
+ required_version = ">= 1.5.4"
}
diff --git a/fdio.infra.terraform/terraform-openstack-2n/README.md b/fdio.infra.terraform/terraform-openstack-2n/README.md
new file mode 100644
index 0000000000..3ddb0f3789
--- /dev/null
+++ b/fdio.infra.terraform/terraform-openstack-2n/README.md
@@ -0,0 +1,58 @@
+# terraform-openstack-2n-generic
+Terraform module to create 2n-generic topology.
+
+<!-- BEGIN_TF_DOCS -->
+## Requirements
+
+| Name | Version |
+|------|---------|
+| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.4.2 |
+| <a name="requirement_openstack"></a> [openstack](#requirement\_openstack) | ~> 1.53.0 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| <a name="provider_local"></a> [local](#provider\_local) | 2.4.1 |
+| <a name="provider_openstack"></a> [openstack](#provider\_openstack) | 1.53.0 |
+| <a name="provider_template"></a> [template](#provider\_template) | 2.2.0 |
+
+## Modules
+
+| Name | Source | Version |
+|------|--------|---------|
+| <a name="module_openstack_compute_keypair_v2"></a> [openstack\_compute\_keypair\_v2](#module\_openstack\_compute\_keypair\_v2) | pmikus/compute-keypair-v2/openstack | 1.54.1 |
+| <a name="module_openstack_images_image_v2"></a> [openstack\_images\_image\_v2](#module\_openstack\_images\_image\_v2) | pmikus/images-image-v2/openstack | 1.54.1 |
+| <a name="module_sut1"></a> [sut1](#module\_sut1) | pmikus/compute-instance-v2/openstack | 1.54.1 |
+| <a name="module_tg1"></a> [tg1](#module\_tg1) | pmikus/compute-instance-v2/openstack | 1.54.1 |
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [local_file.hosts](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [local_file.topology_file](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource |
+| [openstack_networking_port_v2.port_sut1_data1](https://registry.terraform.io/providers/terraform-provider-openstack/openstack/latest/docs/resources/networking_port_v2) | resource |
+| [openstack_networking_port_v2.port_sut1_data2](https://registry.terraform.io/providers/terraform-provider-openstack/openstack/latest/docs/resources/networking_port_v2) | resource |
+| [openstack_networking_port_v2.port_sut1_mgmt](https://registry.terraform.io/providers/terraform-provider-openstack/openstack/latest/docs/resources/networking_port_v2) | resource |
+| [openstack_networking_port_v2.port_tg1_data1](https://registry.terraform.io/providers/terraform-provider-openstack/openstack/latest/docs/resources/networking_port_v2) | resource |
+| [openstack_networking_port_v2.port_tg1_data2](https://registry.terraform.io/providers/terraform-provider-openstack/openstack/latest/docs/resources/networking_port_v2) | resource |
+| [openstack_networking_port_v2.port_tg1_mgmt](https://registry.terraform.io/providers/terraform-provider-openstack/openstack/latest/docs/resources/networking_port_v2) | resource |
+| [template_cloudinit_config.cloudinit_config_sut1](https://registry.terraform.io/providers/hashicorp/template/latest/docs/data-sources/cloudinit_config) | data source |
+| [template_cloudinit_config.cloudinit_config_tg1](https://registry.terraform.io/providers/hashicorp/template/latest/docs/data-sources/cloudinit_config) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| <a name="input_flavour_name"></a> [flavour\_name](#input\_flavour\_name) | (Optional; Required if flavor\_id is empty) The name of the desired flavor for the server. Changing this resizes the existing server. | `string` | n/a | yes |
+| <a name="input_network_id_data"></a> [network\_id\_data](#input\_network\_id\_data) | (Required) The ID of the network to attach the port to. Changing this creates a new port. | `string` | n/a | yes |
+| <a name="input_network_id_mgmt"></a> [network\_id\_mgmt](#input\_network\_id\_mgmt) | (Required) The ID of the network to attach the port to. Changing this creates a new port. | `string` | n/a | yes |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| <a name="output_sut_id"></a> [sut\_id](#output\_sut\_id) | SUT VM ID. |
+| <a name="output_tg_id"></a> [tg\_id](#output\_tg\_id) | TG VM ID. |
+<!-- END_TF_DOCS -->
diff --git a/fdio.infra.terraform/terraform-openstack-2n/hosts.tftpl b/fdio.infra.terraform/terraform-openstack-2n/hosts.tftpl
new file mode 100644
index 0000000000..cb36dbb138
--- /dev/null
+++ b/fdio.infra.terraform/terraform-openstack-2n/hosts.tftpl
@@ -0,0 +1,8 @@
+all:
+ children:
+ tg:
+ hosts:
+ ${tg_public_ip}
+ sut:
+ hosts:
+ ${dut1_public_ip} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-openstack-2n/main.tf b/fdio.infra.terraform/terraform-openstack-2n/main.tf
new file mode 100644
index 0000000000..f306933a81
--- /dev/null
+++ b/fdio.infra.terraform/terraform-openstack-2n/main.tf
@@ -0,0 +1,211 @@
+locals {
+ image_name = "Ubuntu 22.04.2 LTS"
+ image_source_url = "http://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
+ resource_prefix = "csit-2n"
+ testbed_name = "xu6n"
+ topology_name = "2n"
+}
+
+# Create Cloud-Init config for TG.
+data "template_cloudinit_config" "cloudinit_config_tg1" {
+ gzip = false
+ base64_encode = false
+
+ part {
+ content_type = "text/cloud-config"
+ content = templatefile(
+ "${path.module}/user-data-tg1", {}
+ )
+ }
+}
+
+# Create Cloud-Init config for SUT1.
+data "template_cloudinit_config" "cloudinit_config_sut1" {
+ gzip = false
+ base64_encode = false
+
+ part {
+ content_type = "text/cloud-config"
+ content = templatefile(
+ "${path.module}/user-data-sut1", {}
+ )
+ }
+}
+
+# Create OpenStack Image.
+module "openstack_images_image_v2" {
+ source = "pmikus/images-image-v2/openstack"
+ version = "1.54.1"
+
+ image_source_url = local.image_source_url
+ name = local.image_name
+}
+
+# Create OpenStack Keypair.
+module "openstack_compute_keypair_v2" {
+ source = "pmikus/compute-keypair-v2/openstack"
+ version = "1.54.1"
+
+ name = "${local.resource_prefix}-keypair"
+}
+
+
+# Create management port in dedicated subnet.
+resource "openstack_networking_port_v2" "port_tg1_mgmt" {
+ admin_state_up = true
+ fixed_ip {
+ ip_address = "10.21.152.2"
+ subnet_id = "b1f9573d-4c2e-45da-bbac-cb3f191ab0f5"
+ }
+ name = "${local.resource_prefix}-tg1-mgmt-port"
+ network_id = var.network_id_mgmt
+ port_security_enabled = false
+
+ binding {
+ vnic_type = "normal"
+ }
+}
+
+# Create data port in dedicated subnet.
+resource "openstack_networking_port_v2" "port_tg1_data1" {
+ admin_state_up = false
+ name = "${local.resource_prefix}-tg1-data1-port"
+ network_id = var.network_id_data
+ port_security_enabled = false
+
+ binding {
+ vnic_type = "direct"
+ }
+}
+
+# Create data port in dedicated subnet.
+resource "openstack_networking_port_v2" "port_tg1_data2" {
+ admin_state_up = false
+ name = "${local.resource_prefix}-tg1-data2-port"
+ network_id = var.network_id_data
+ port_security_enabled = false
+
+ binding {
+ vnic_type = "direct"
+ }
+}
+
+# Create TG instance.
+module "tg1" {
+ depends_on = [
+ module.openstack_compute_keypair_v2,
+ module.openstack_images_image_v2
+ ]
+
+ source = "pmikus/compute-instance-v2/openstack"
+ version = "1.54.1"
+
+ flavour_name = var.flavour_name
+ image_id = module.openstack_images_image_v2.id
+ key_pair = module.openstack_compute_keypair_v2.name
+ name = "${local.resource_prefix}-tg1"
+ networks = {
+ "platform-shared-port" = openstack_networking_port_v2.port_tg1_mgmt.id
+ "data-playground-port1" = openstack_networking_port_v2.port_tg1_data1.id
+ "data-playground-port2" = openstack_networking_port_v2.port_tg1_data2.id
+ }
+ user_data = data.template_cloudinit_config.cloudinit_config_tg1.rendered
+}
+
+# Create management port in dedicated subnet.
+resource "openstack_networking_port_v2" "port_sut1_mgmt" {
+ admin_state_up = true
+ fixed_ip {
+ ip_address = "10.21.152.3"
+ subnet_id = "b1f9573d-4c2e-45da-bbac-cb3f191ab0f5"
+ }
+ name = "${local.resource_prefix}-sut1-mgmt-port"
+ network_id = var.network_id_mgmt
+ port_security_enabled = false
+
+ binding {
+ vnic_type = "normal"
+ }
+}
+
+# Create data port in dedicated subnet.
+resource "openstack_networking_port_v2" "port_sut1_data1" {
+ admin_state_up = false
+ name = "${local.resource_prefix}-sut1-data1-port"
+ network_id = var.network_id_data
+ port_security_enabled = false
+
+ binding {
+ vnic_type = "direct"
+ }
+}
+
+# Create data port in dedicated subnet.
+resource "openstack_networking_port_v2" "port_sut1_data2" {
+ admin_state_up = false
+ name = "${local.resource_prefix}-sut1-data2-port"
+ network_id = var.network_id_data
+ port_security_enabled = false
+
+ binding {
+ vnic_type = "direct"
+ }
+}
+
+# Create SUT instance.
+module "sut1" {
+ depends_on = [
+ module.openstack_compute_keypair_v2,
+ module.openstack_images_image_v2
+ ]
+
+ source = "pmikus/compute-instance-v2/openstack"
+ version = "1.54.1"
+
+ flavour_name = var.flavour_name
+ image_id = module.openstack_images_image_v2.id
+ key_pair = module.openstack_compute_keypair_v2.name
+ name = "${local.resource_prefix}-sut1"
+ networks = {
+ "platform-shared-port" = openstack_networking_port_v2.port_sut1_mgmt.id
+ "data-playground-port1" = openstack_networking_port_v2.port_sut1_data1.id
+ "data-playground-port2" = openstack_networking_port_v2.port_sut1_data2.id
+ }
+ user_data = data.template_cloudinit_config.cloudinit_config_sut1.rendered
+}
+
+resource "local_file" "topology_file" {
+ depends_on = [
+ module.tg1,
+ module.sut1
+ ]
+
+ content = templatefile(
+ "${path.module}/topology-${local.topology_name}.tftpl",
+ {
+ tg_if1_mac = openstack_networking_port_v2.port_tg1_data1.mac_address
+ tg_if2_mac = openstack_networking_port_v2.port_tg1_data2.mac_address
+ dut1_if1_mac = openstack_networking_port_v2.port_sut1_data1.mac_address
+ dut1_if2_mac = openstack_networking_port_v2.port_sut1_data2.mac_address
+ tg_public_ip = openstack_networking_port_v2.port_tg1_mgmt.fixed_ip[0].ip_address
+ dut1_public_ip = openstack_networking_port_v2.port_sut1_mgmt.fixed_ip[0].ip_address
+ }
+ )
+ filename = "${path.module}/${local.topology_name}-x-${local.testbed_name}.yaml"
+}
+
+resource "local_file" "hosts" {
+ depends_on = [
+ module.tg1,
+ module.sut1
+ ]
+
+ content = templatefile(
+ "${path.module}/hosts.tftpl",
+ {
+ tg_public_ip = openstack_networking_port_v2.port_tg1_mgmt.fixed_ip[0].ip_address
+ dut1_public_ip = openstack_networking_port_v2.port_sut1_mgmt.fixed_ip[0].ip_address
+ }
+ )
+ filename = "${path.module}/hosts.yaml"
+}
diff --git a/fdio.infra.terraform/terraform-openstack-2n/outputs.tf b/fdio.infra.terraform/terraform-openstack-2n/outputs.tf
new file mode 100644
index 0000000000..f8985685f0
--- /dev/null
+++ b/fdio.infra.terraform/terraform-openstack-2n/outputs.tf
@@ -0,0 +1,9 @@
+output "tg_id" {
+ description = "TG VM ID."
+ value = module.tg1.id
+}
+
+output "sut_id" {
+ description = "SUT VM ID."
+ value = module.sut1.id
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-openstack-2n/topology-2n.tftpl b/fdio.infra.terraform/terraform-openstack-2n/topology-2n.tftpl
new file mode 100644
index 0000000000..1129a6f8f3
--- /dev/null
+++ b/fdio.infra.terraform/terraform-openstack-2n/topology-2n.tftpl
@@ -0,0 +1,59 @@
+---
+metadata:
+ version: 0.1
+ schema:
+ - resources/topology_schemas/2_node_topology.sch.yaml
+ - resources/topology_schemas/topology.sch.yaml
+ tags: [hw, 2-node]
+
+nodes:
+ TG:
+ type: TG
+ subtype: TREX
+ host: "${tg_public_ip}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ interfaces:
+ port1:
+ # tg_instance/p1 - 100GE port1 on E810 NIC.
+ mac_address: "${tg_if1_mac}"
+ pci_address: "0000:00:05.0"
+ ip4_address: "172.16.10.2"
+ driver: iavf
+ link: link1
+ model: Intel-E810
+ port2:
+ # tg_instance/p2 - 100GE port2 on E810 NIC.
+ mac_address: "${tg_if2_mac}"
+ pci_address: "0000:00:06.0"
+ ip4_address: "172.16.20.2"
+ driver: iavf
+ link: link2
+ model: Intel-E810
+ DUT1:
+ type: DUT
+ host: "${dut1_public_ip}"
+ arch: x86_64
+ port: 22
+ username: testuser
+ password: Csit1234
+ uio_driver: vfio-pci
+ interfaces:
+ port1:
+ # dut1_instance/p1 - 100GE port1 on E810 NIC.
+ mac_address: "${dut1_if1_mac}"
+ pci_address: "0000:00:05.0"
+ ip4_address: "172.16.10.1"
+ driver: iavf
+ link: link1
+ model: Intel-E810
+ port2:
+ # dut1_instance/p2 - 100GE port2 on E810 NIC.
+ mac_address: "${dut1_if2_mac}"
+ pci_address: "0000:00:06.0"
+ ip4_address: "172.16.20.1"
+ driver: iavf
+ link: link2
+ model: Intel-E810 \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-openstack-2n/user-data-sut1 b/fdio.infra.terraform/terraform-openstack-2n/user-data-sut1
new file mode 100644
index 0000000000..9838f1b165
--- /dev/null
+++ b/fdio.infra.terraform/terraform-openstack-2n/user-data-sut1
@@ -0,0 +1,66 @@
+#cloud-config
+apt:
+ sources:
+ docker.list:
+ source: "deb [arch=amd64] https://download.docker.com/linux/ubuntu $RELEASE stable"
+ keyid: "9DC858229FC7DD38854AE2D88D81803C0EBFCD88"
+groups:
+ - "docker"
+hostname: "s02-t21-sut1"
+package_update: true
+packages:
+ - "apt-transport-https"
+ - "autoconf"
+ - "build-essential"
+ - "ca-certificates"
+ - "cgroup-tools"
+ - "curl"
+ - "dkms"
+ - "docker-ce"
+ - "docker-ce-cli"
+ - "gdb"
+ - "gnupg-agent"
+ - "iperf3"
+ - "libglib2.0-dev"
+ - "libmbedcrypto7"
+ - "libmbedtls14"
+ - "libmbedx509-1"
+ - "libnuma-dev"
+ - "libpixman-1-dev"
+ - "libpcap-dev"
+ - "libtool"
+ - "linux-tools-common"
+ - "lxc"
+ - "net-tools"
+ - "ninja-build"
+ - "numactl"
+ - "pkg-config"
+ - "python3-all"
+ - "python3-apt"
+ - "python3-cffi"
+ - "python3-cffi-backend"
+ - "python3-dev"
+ - "python3-pip"
+ - "python3-pyelftools"
+ - "python3-setuptools"
+ - "qemu-system"
+ - "screen"
+ - "socat"
+ - "software-properties-common"
+ - "unzip"
+ - "virtualenv"
+runcmd:
+ - sed -i '/PermitRootLogin/d' /etc/ssh/sshd_config
+ - echo "PermitRootLogin no" >> /etc/ssh/sshd_config
+ - systemctl restart sshd
+ - systemctl start docker
+ - systemctl enable docker
+ssh_pwauth: True
+users:
+ - name: "testuser"
+ groups: users, admin, docker, sudo
+ shell: "/bin/bash"
+ sudo:
+ - ALL=(ALL) NOPASSWD:ALL
+ lock_passwd: false
+ passwd: "$6$Y62lhMGJD8YNzmJn$H4DSqjrwFp5WN3tOvIrF.g/G2duOV76zXHAmaA/RU8jfT8H8sDumLQe/Q.EmI5pjPv7dzgI8j9BQPWes7efBK0" \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-openstack-2n/user-data-tg1 b/fdio.infra.terraform/terraform-openstack-2n/user-data-tg1
new file mode 100644
index 0000000000..59f6a481db
--- /dev/null
+++ b/fdio.infra.terraform/terraform-openstack-2n/user-data-tg1
@@ -0,0 +1,71 @@
+#cloud-config
+apt:
+ sources:
+ docker.list:
+ source: "deb [arch=amd64] https://download.docker.com/linux/ubuntu $RELEASE stable"
+ keyid: "9DC858229FC7DD38854AE2D88D81803C0EBFCD88"
+groups:
+ - "docker"
+hostname: "s01-t21-tg"
+package_update: true
+packages:
+ - "apt-transport-https"
+ - "autoconf"
+ - "build-essential"
+ - "ca-certificates"
+ - "cgroup-tools"
+ - "curl"
+ - "dkms"
+ - "docker-ce"
+ - "docker-ce-cli"
+ - "gcc-9"
+ - "g++-9"
+ - "gnupg-agent"
+ - "iperf3"
+ - "libmnl-dev"
+ - "libnuma-dev"
+ - "libpcap-dev"
+ - "librdmacm-dev"
+ - "librdmacm1"
+ - "libssl-dev"
+ - "linux-tools-common"
+ - "net-tools"
+ - "ninja-build"
+ - "numactl"
+ - "pciutils"
+ - "python3-all"
+ - "python3-apt"
+ - "python3-cffi"
+ - "python3-cffi-backend"
+ - "python3-dev"
+ - "python3-pip"
+ - "python3-pyelftools"
+ - "python3-setuptools"
+ - "qemu-system"
+ - "socat"
+ - "software-properties-common"
+ - "unzip"
+ - "virtualenv"
+ - "zlib1g-dev"
+runcmd:
+ - sed -i '/PermitRootLogin/d' /etc/ssh/sshd_config
+ - echo "PermitRootLogin no" >> /etc/ssh/sshd_config
+ - systemctl restart sshd
+ - systemctl start docker
+ - systemctl enable docker
+ - curl --proxy "http://[2620:0:cc8:11::1]:8888" -L http://github.com/cisco-system-traffic-generator/trex-core/archive/v3.03.tar.gz -o /opt/trex-core-v3.03.tar.gz
+ - mkdir -p /opt/trex-core-v3.03
+ - tar xzf /opt/trex-core-v3.03.tar.gz -C /opt/trex-core-v3.03 --strip-components=1
+ - cd /opt/trex-core-v3.03/linux_dpdk && ./b configure
+ - cd /opt/trex-core-v3.03/linux_dpdk && ./b build
+ - cd /opt/trex-core-v3.03/scripts/ko/src && make
+ - cd /opt/trex-core-v3.03/scripts/ko/src && make install
+ssh_pwauth: True
+users:
+ - name: "testuser"
+ groups: users, admin, docker, sudo
+ shell: "/bin/bash"
+ sudo:
+ - ALL=(ALL) NOPASSWD:ALL
+ lock_passwd: false
+ passwd: "$6$Y62lhMGJD8YNzmJn$H4DSqjrwFp5WN3tOvIrF.g/G2duOV76zXHAmaA/RU8jfT8H8sDumLQe/Q.EmI5pjPv7dzgI8j9BQPWes7efBK0"
diff --git a/fdio.infra.terraform/terraform-openstack-2n/variables.tf b/fdio.infra.terraform/terraform-openstack-2n/variables.tf
new file mode 100644
index 0000000000..d761016699
--- /dev/null
+++ b/fdio.infra.terraform/terraform-openstack-2n/variables.tf
@@ -0,0 +1,14 @@
+variable "flavour_name" {
+ description = "(Optional; Required if flavor_id is empty) The name of the desired flavor for the server. Changing this resizes the existing server."
+ type = string
+}
+
+variable "network_id_data" {
+ description = "(Required) The ID of the network to attach the port to. Changing this creates a new port."
+ type = string
+}
+
+variable "network_id_mgmt" {
+ description = "(Required) The ID of the network to attach the port to. Changing this creates a new port."
+ type = string
+}
diff --git a/fdio.infra.terraform/terraform-openstack-2n/versions.tf b/fdio.infra.terraform/terraform-openstack-2n/versions.tf
new file mode 100644
index 0000000000..1ad4a215b5
--- /dev/null
+++ b/fdio.infra.terraform/terraform-openstack-2n/versions.tf
@@ -0,0 +1,9 @@
+terraform {
+ required_providers {
+ openstack = {
+ source = "terraform-provider-openstack/openstack"
+ version = "~> 1.53.0"
+ }
+ }
+ required_version = ">= 1.4.2"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-vault-aws-secret-backend/fdio/variables.tf b/fdio.infra.terraform/terraform-vault-aws-secret-backend/fdio/variables.tf
index ed4ecc007d..d3d728a49d 100644
--- a/fdio.infra.terraform/terraform-vault-aws-secret-backend/fdio/variables.tf
+++ b/fdio.infra.terraform/terraform-vault-aws-secret-backend/fdio/variables.tf
@@ -1,17 +1,17 @@
variable "vault_provider_address" {
description = "Vault cluster address."
type = string
- default = "vault.service.consul:8200"
+ default = "http://10.30.51.23:8200"
}
variable "vault_provider_skip_tls_verify" {
- description = "Verification of the Vault server's TLS certificate"
+ description = "Verification of the Vault server's TLS certificate."
type = bool
default = false
}
variable "vault_provider_token" {
- description = "Vault root token"
+ description = "Vault root token."
type = string
sensitive = true
}
diff --git a/fdio.infra.terraform/terraform-vault-aws-secret-backend/fdio/versions.tf b/fdio.infra.terraform/terraform-vault-aws-secret-backend/fdio/versions.tf
index 2b62d8d1ee..4c93000093 100644
--- a/fdio.infra.terraform/terraform-vault-aws-secret-backend/fdio/versions.tf
+++ b/fdio.infra.terraform/terraform-vault-aws-secret-backend/fdio/versions.tf
@@ -1,13 +1,13 @@
terraform {
backend "consul" {
- address = "consul.service.consul:8500"
+ address = "10.30.51.23:8500"
scheme = "http"
path = "terraform/aws-secret-backend"
}
required_providers {
vault = {
- version = ">= 3.2.1"
+ version = ">= 3.12.0"
}
}
- required_version = ">= 1.1.4"
+ required_version = ">= 1.5.4"
}