diff options
34 files changed, 1098 insertions, 1294 deletions
diff --git a/fdio.infra.terraform/.gitignore b/fdio.infra.terraform/.gitignore index 5084941394..5c864924c0 100644 --- a/fdio.infra.terraform/.gitignore +++ b/fdio.infra.terraform/.gitignore @@ -2,8 +2,8 @@ **/.terraform/* # .tfstate files -*.tfstate -*.tfstate.* +#*.tfstate +#*.tfstate.* # Crash log files crash.log diff --git a/fdio.infra.terraform/1n_nmd/.terraform.lock.hcl b/fdio.infra.terraform/1n_nmd/.terraform.lock.hcl deleted file mode 100644 index 3a2e4ef85f..0000000000 --- a/fdio.infra.terraform/1n_nmd/.terraform.lock.hcl +++ /dev/null @@ -1,58 +0,0 @@ -# This file is maintained automatically by "terraform init". -# Manual edits may be lost in future updates. - -provider "registry.terraform.io/hashicorp/nomad" { - version = "1.4.11" - constraints = "~> 1.4.9" - hashes = [ - "h1:ElEvgyMfWoWyQbB6c51rGTjQlZKWf3QOvf5NhX/Vuyw=", - "zh:150d0ab25241a42f2ac5008878e0106c0887eec15181a40bee1433b87f01b8ed", - "zh:1d4ccda0729f12060e7f4ce5c6d83042d6d38ba2e546b68722ccb74832793b0c", - "zh:2964652181f59097aa1126f4b215b9232702b1a56df3e017e6b5683d5615714b", - "zh:42843e68bca24280e84ec600ae5d8f085fb26bdcdb4c0ccff2139ed81e1cb8c1", - "zh:4c6d90d40a360d84bc84c9af35c64056237537fa0f8118bf890fcf4e71f7b0f6", - "zh:51771ce42a99d7d4f5a4306367eee4cea72391c07f1f1c55c3c4a5c6a9eca53d", - "zh:6ab2389f1be6bb39d4966c253bf4fc77348e90f7e07ed2abb5ec5c90a4bbb615", - "zh:9b109254ea7ca6a5b0ede33b406cf5fed779f05672891bbd1cc3255c9cb17663", - "zh:a38c929d4fd03193cce94178c0fbaa1f7f09e93223ac71dc77c834d429b1c7c9", - "zh:bdc9bc10a1ecb5ae3da651df1709bf9d5474f25e310b73bdf32c86417674d32b", - ] -} - -provider "registry.terraform.io/hashicorp/template" { - version = "2.1.2" - constraints = "~> 2.1.2" - hashes = [ - "h1:8NcPRk3yxQtUlAT/YGfjBEJ76rQI2ljARYeIEjhtWho=", - "zh:149e4bf47ac21b67f6567767afcd29caaf0b0ca43714748093a00a2a98cd17a8", - "zh:2ff61a5eb7550e0df2baefccea78a8b621faef76154aad7ddf9c85c1d69f7ebf", - "zh:3b2d9a9f80754eb0a250a80e0dfdef385501697850a54ead744d1615e60fe648", - "zh:545b93c818035aac59f4a821644276c123a74aa210b1221974d832a6009df201", - "zh:5508512a522152a302591b399512fa736d8f57088c85ca74f7e00014db3a8c26", - "zh:701b56016a6db814ade171877375a2429b45979f97c2d112e4f2103f0433eb08", - "zh:90fc08165958538d8a099f17282c615d5b13f86bb215af33e2ca7551bf81996f", - "zh:affa6d409060c01a610854a395970d76701d0b07696e1ed6776b3f3b58014104", - "zh:b66ffed670bf0ed6714fa4ac26444a8e22f71ec6da134faf0b1f77fb2c13c666", - "zh:bb3d87db22f0ac56717eadde39690e3e27c1c01b10d0ecbe2e6e39f1e5c4d808", - "zh:c54b9693c9f348591432aabc808cbe1786bcda1cb70d312ef62a24545a14f945", - "zh:e7c8f8506cee5fa28f842714857d412a2b09e61127a0efe2a164c2f3d9bf2619", - ] -} - -provider "registry.terraform.io/hashicorp/vault" { - version = "2.16.0" - constraints = ">= 2.14.0" - hashes = [ - "h1:h27r8aZ5nwRfEelTQnJoA8s3TndJYPI7+3Df1DXIhXk=", - "zh:13dde74fac618ee0281bad60a60966a85d4a59c8420b15fd6499996fa1bc99b3", - "zh:1daad9da6c82f43cbd07bf1cfedf3c6960fb2f96bc59f94fd75d361065b8c51a", - "zh:68075d8e1824b745267ce9e4ef693b202b9282561811de6ccf7298935f482128", - "zh:86df4a4405413d575cd72985483163e62539afbd659fddef59fc637875b707e2", - "zh:8f8306ada4c1c44945ce5205e4f1cfbf5e3d46a9da2f3a1d0be17d32e4935845", - "zh:9eb75febcd6fcca9885a6f5e93293a200b2effbe31f47d265cc4d1346d42d29e", - "zh:a658b55b239bc7ad59a2bf55e7abbfe5f0111d37dd68b5d4bb947eee93969092", - "zh:af10679c241bd0e0168f57c24e839fd24c747f3e84b7bb6de3cd791471709249", - "zh:ee3030f36846de45450be088aa4c2b1f69246b2ecf40d7ea6a15a7f09ac5e5d0", - "zh:efe6cc23f77336604358e627b0b565c1421a97376e510a9cdaaf849524944713", - ] -} diff --git a/fdio.infra.terraform/1n_nmd/alertmanager/main.tf b/fdio.infra.terraform/1n_nmd/alertmanager/main.tf index 9525aabc0c..b7ab5dce92 100644 --- a/fdio.infra.terraform/1n_nmd/alertmanager/main.tf +++ b/fdio.infra.terraform/1n_nmd/alertmanager/main.tf @@ -1,5 +1,5 @@ locals { - datacenters = join(",", var.nomad_datacenters) + datacenters = join(",", var.nomad_datacenters) alertmanager_url = join("", [ @@ -12,8 +12,8 @@ locals { } data "template_file" "nomad_job_alertmanager" { - template = file("${path.module}/conf/nomad/alertmanager.hcl") - vars = { + template = file("${path.module}/conf/nomad/alertmanager.hcl") + vars = { datacenters = local.datacenters url = local.alertmanager_url job_name = var.alertmanager_job_name @@ -35,6 +35,6 @@ data "template_file" "nomad_job_alertmanager" { } resource "nomad_job" "nomad_job_alertmanager" { - jobspec = data.template_file.nomad_job_alertmanager.rendered - detach = false + jobspec = data.template_file.nomad_job_alertmanager.rendered + detach = false }
\ No newline at end of file diff --git a/fdio.infra.terraform/1n_nmd/alertmanager/variables.tf b/fdio.infra.terraform/1n_nmd/alertmanager/variables.tf index ffedf24f3d..e24ceb64c6 100644 --- a/fdio.infra.terraform/1n_nmd/alertmanager/variables.tf +++ b/fdio.infra.terraform/1n_nmd/alertmanager/variables.tf @@ -2,7 +2,7 @@ variable "nomad_datacenters" { description = "Nomad data centers" type = list(string) - default = [ "dc1" ] + default = ["dc1"] } # Alermanager @@ -38,7 +38,7 @@ variable "alertmanager_use_canary" { variable "alertmanager_vault_secret" { description = "Set of properties to be able to fetch secret from vault" - type = object({ + type = object({ use_vault_provider = bool, vault_kv_policy_name = string, vault_kv_path = string, diff --git a/fdio.infra.terraform/1n_nmd/aws/main.tf b/fdio.infra.terraform/1n_nmd/aws/main.tf index 8084eb4e33..be7eb7c577 100644 --- a/fdio.infra.terraform/1n_nmd/aws/main.tf +++ b/fdio.infra.terraform/1n_nmd/aws/main.tf @@ -1,18 +1,18 @@ resource "vault_aws_secret_backend" "aws" { - access_key = var.aws_access_key - secret_key = var.aws_secret_key - path = "${var.name}-path" + access_key = var.aws_access_key + secret_key = var.aws_secret_key + path = "${var.name}-path" default_lease_ttl_seconds = "43200" max_lease_ttl_seconds = "43200" } resource "vault_aws_secret_backend_role" "admin" { - backend = vault_aws_secret_backend.aws.path - name = "${var.name}-role" - credential_type = "iam_user" + backend = vault_aws_secret_backend.aws.path + name = "${var.name}-role" + credential_type = "iam_user" - policy_document = <<EOF + policy_document = <<EOF { "Version": "2012-10-17", "Statement": [ @@ -29,9 +29,9 @@ EOF } output "backend" { - value = vault_aws_secret_backend.aws.path + value = vault_aws_secret_backend.aws.path } output "role" { - value = vault_aws_secret_backend_role.admin.name + value = vault_aws_secret_backend_role.admin.name } diff --git a/fdio.infra.terraform/1n_nmd/aws/providers.tf b/fdio.infra.terraform/1n_nmd/aws/providers.tf index 49922fd78f..9bcd95ec57 100644 --- a/fdio.infra.terraform/1n_nmd/aws/providers.tf +++ b/fdio.infra.terraform/1n_nmd/aws/providers.tf @@ -1,7 +1,7 @@ terraform { required_providers { - vault = { - version = ">=2.22.1" + vault = { + version = ">=2.22.1" } } required_version = ">= 1.0.3" diff --git a/fdio.infra.terraform/1n_nmd/aws/variables.tf b/fdio.infra.terraform/1n_nmd/aws/variables.tf index 4e5f61f238..ee9fb73a73 100644 --- a/fdio.infra.terraform/1n_nmd/aws/variables.tf +++ b/fdio.infra.terraform/1n_nmd/aws/variables.tf @@ -1,11 +1,11 @@ variable "aws_access_key" { - sensitive = true + sensitive = true } variable "aws_secret_key" { - sensitive = true + sensitive = true } variable "name" { - default = "dynamic-aws-creds-vault-admin" + default = "dynamic-aws-creds-vault-admin" }
\ No newline at end of file diff --git a/fdio.infra.terraform/1n_nmd/grafana/main.tf b/fdio.infra.terraform/1n_nmd/grafana/main.tf index b67ba03985..e7cc1955c4 100644 --- a/fdio.infra.terraform/1n_nmd/grafana/main.tf +++ b/fdio.infra.terraform/1n_nmd/grafana/main.tf @@ -3,8 +3,8 @@ locals { } data "template_file" "nomad_job_grafana" { - template = file("${path.module}/conf/nomad/grafana.hcl") - vars = { + template = file("${path.module}/conf/nomad/grafana.hcl") + vars = { datacenters = local.datacenters job_name = var.grafana_job_name use_canary = var.grafana_use_canary @@ -19,6 +19,6 @@ data "template_file" "nomad_job_grafana" { } resource "nomad_job" "nomad_job_grafana" { - jobspec = data.template_file.nomad_job_grafana.rendered - detach = false + jobspec = data.template_file.nomad_job_grafana.rendered + detach = false }
\ No newline at end of file diff --git a/fdio.infra.terraform/1n_nmd/grafana/variables.tf b/fdio.infra.terraform/1n_nmd/grafana/variables.tf index 0c2382b16a..88f8bdade1 100644 --- a/fdio.infra.terraform/1n_nmd/grafana/variables.tf +++ b/fdio.infra.terraform/1n_nmd/grafana/variables.tf @@ -2,7 +2,7 @@ variable "nomad_datacenters" { description = "Nomad data centers" type = list(string) - default = [ "dc1" ] + default = ["dc1"] } # Grafana @@ -38,7 +38,7 @@ variable "grafana_use_canary" { variable "grafana_vault_secret" { description = "Set of properties to be able to fetch secret from vault" - type = object({ + type = object({ use_vault_provider = bool, vault_kv_policy_name = string, vault_kv_path = string, diff --git a/fdio.infra.terraform/1n_nmd/main.tf b/fdio.infra.terraform/1n_nmd/main.tf index d48f12a046..a8a1bb9315 100644 --- a/fdio.infra.terraform/1n_nmd/main.tf +++ b/fdio.infra.terraform/1n_nmd/main.tf @@ -5,24 +5,24 @@ # and have them automatically associated with the root provider # configurations. module "alertmanager" { - source = "./alertmanager" - providers = { + source = "./alertmanager" + providers = { nomad = nomad.yul1 } # nomad - nomad_datacenters = [ "yul1" ] + nomad_datacenters = ["yul1"] # alertmanager - alertmanager_job_name = "prod-alertmanager" - alertmanager_use_canary = true - alertmanager_group_count = 1 - alertmanager_vault_secret = { - use_vault_provider = false, - vault_kv_policy_name = "kv-secret", - vault_kv_path = "secret/data/prometheus", - vault_kv_field_access_key = "access_key", - vault_kv_field_secret_key = "secret_key" + alertmanager_job_name = "prod-alertmanager" + alertmanager_use_canary = true + alertmanager_group_count = 1 + alertmanager_vault_secret = { + use_vault_provider = false, + vault_kv_policy_name = "kv-secret", + vault_kv_path = "secret/data/prometheus", + vault_kv_field_access_key = "access_key", + vault_kv_field_secret_key = "secret_key" } alertmanager_version = "0.21.0" alertmanager_cpu = 1000 @@ -35,131 +35,131 @@ module "alertmanager" { } module "grafana" { - source = "./grafana" - providers = { + source = "./grafana" + providers = { nomad = nomad.yul1 } # nomad - nomad_datacenters = [ "yul1" ] + nomad_datacenters = ["yul1"] # grafana - grafana_job_name = "prod-grafana" - grafana_use_canary = true - grafana_group_count = 1 - grafana_vault_secret = { - use_vault_provider = false, - vault_kv_policy_name = "kv-secret", - vault_kv_path = "secret/data/grafana", - vault_kv_field_access_key = "access_key", - vault_kv_field_secret_key = "secret_key" + grafana_job_name = "prod-grafana" + grafana_use_canary = true + grafana_group_count = 1 + grafana_vault_secret = { + use_vault_provider = false, + vault_kv_policy_name = "kv-secret", + vault_kv_path = "secret/data/grafana", + vault_kv_field_access_key = "access_key", + vault_kv_field_secret_key = "secret_key" } - grafana_container_image = "grafana/grafana:7.3.7" - grafana_cpu = 1000 - grafana_mem = 2048 - grafana_port = 3000 + grafana_container_image = "grafana/grafana:7.3.7" + grafana_cpu = 1000 + grafana_mem = 2048 + grafana_port = 3000 } module "minio" { - source = "./minio" - providers = { + source = "./minio" + providers = { nomad = nomad.yul1 } # nomad - nomad_datacenters = [ "yul1" ] - nomad_host_volume = "prod-volume-data1-1" + nomad_datacenters = ["yul1"] + nomad_host_volume = "prod-volume-data1-1" # minio - minio_job_name = "prod-minio" - minio_group_count = 4 - minio_service_name = "storage" - minio_host = "http://10.32.8.1{4...7}" - minio_port = 9000 - minio_container_image = "minio/minio:RELEASE.2021-07-27T02-40-15Z" - minio_vault_secret = { - use_vault_provider = false, - vault_kv_policy_name = "kv-secret", - vault_kv_path = "secret/data/minio", - vault_kv_field_access_key = "access_key", - vault_kv_field_secret_key = "secret_key" + minio_job_name = "prod-minio" + minio_group_count = 4 + minio_service_name = "storage" + minio_host = "http://10.32.8.1{4...7}" + minio_port = 9000 + minio_container_image = "minio/minio:RELEASE.2021-07-27T02-40-15Z" + minio_vault_secret = { + use_vault_provider = false, + vault_kv_policy_name = "kv-secret", + vault_kv_path = "secret/data/minio", + vault_kv_field_access_key = "access_key", + vault_kv_field_secret_key = "secret_key" } - minio_data_dir = "/data/" - minio_use_host_volume = true - minio_use_canary = true - minio_envs = [ "MINIO_BROWSER=\"off\"" ] + minio_data_dir = "/data/" + minio_use_host_volume = true + minio_use_canary = true + minio_envs = ["MINIO_BROWSER=\"off\""] # minio client - mc_job_name = "prod-mc" - mc_container_image = "minio/mc:RELEASE.2021-07-27T02-40-15Z" - mc_extra_commands = [ + mc_job_name = "prod-mc" + mc_container_image = "minio/mc:RELEASE.2021-07-27T02-40-15Z" + mc_extra_commands = [ "mc policy set public LOCALMINIO/logs.fd.io", "mc policy set public LOCALMINIO/docs.fd.io", "mc ilm add --expiry-days '180' LOCALMINIO/logs.fd.io", "mc admin user add LOCALMINIO storage Storage1234", "mc admin policy set LOCALMINIO writeonly user=storage" ] - minio_buckets = [ "logs.fd.io", "docs.fd.io" ] + minio_buckets = ["logs.fd.io", "docs.fd.io"] } module "nginx" { - source = "./nginx" - providers = { + source = "./nginx" + providers = { nomad = nomad.yul1 } # nomad - nomad_datacenters = [ "yul1" ] - nomad_host_volume = "prod-volume-data1-1" + nomad_datacenters = ["yul1"] + nomad_host_volume = "prod-volume-data1-1" # nginx - nginx_job_name = "prod-nginx" - nginx_use_host_volume = true + nginx_job_name = "prod-nginx" + nginx_use_host_volume = true } module "prometheus" { - source = "./prometheus" - providers = { + source = "./prometheus" + providers = { nomad = nomad.yul1 } # nomad - nomad_datacenters = [ "yul1" ] - nomad_host_volume = "prod-volume-data1-1" + nomad_datacenters = ["yul1"] + nomad_host_volume = "prod-volume-data1-1" # prometheus - prometheus_job_name = "prod-prometheus" - prometheus_use_canary = true - prometheus_group_count = 4 - prometheus_vault_secret = { - use_vault_provider = false, - vault_kv_policy_name = "kv-secret", - vault_kv_path = "secret/data/prometheus", - vault_kv_field_access_key = "access_key", - vault_kv_field_secret_key = "secret_key" + prometheus_job_name = "prod-prometheus" + prometheus_use_canary = true + prometheus_group_count = 4 + prometheus_vault_secret = { + use_vault_provider = false, + vault_kv_policy_name = "kv-secret", + vault_kv_path = "secret/data/prometheus", + vault_kv_field_access_key = "access_key", + vault_kv_field_secret_key = "secret_key" } - prometheus_data_dir = "/data/" - prometheus_use_host_volume = true - prometheus_version = "2.28.1" - prometheus_cpu = 2000 - prometheus_mem = 8192 - prometheus_port = 9090 + prometheus_data_dir = "/data/" + prometheus_use_host_volume = true + prometheus_version = "2.28.1" + prometheus_cpu = 2000 + prometheus_mem = 8192 + prometheus_port = 9090 } module "vpp_device" { - source = "./vpp_device" - providers = { + source = "./vpp_device" + providers = { nomad = nomad.yul1 } # nomad - nomad_datacenters = [ "yul1" ] + nomad_datacenters = ["yul1"] # csit_shim - csit_shim_job_name = "prod-device-csit-shim" - csit_shim_group_count = "1" - csit_shim_cpu = "1500" - csit_shim_mem = "4096" - csit_shim_image_aarch64 = "fdiotools/csit_shim-ubuntu2004:2021_03_02_143938_UTC-aarch64" - csit_shim_image_x86_64 = "fdiotools/csit_shim-ubuntu2004:2021_03_04_142103_UTC-x86_64" + csit_shim_job_name = "prod-device-csit-shim" + csit_shim_group_count = "1" + csit_shim_cpu = "1500" + csit_shim_mem = "4096" + csit_shim_image_aarch64 = "fdiotools/csit_shim-ubuntu2004:2021_03_02_143938_UTC-aarch64" + csit_shim_image_x86_64 = "fdiotools/csit_shim-ubuntu2004:2021_03_04_142103_UTC-x86_64" } diff --git a/fdio.infra.terraform/1n_nmd/minio/main.tf b/fdio.infra.terraform/1n_nmd/minio/main.tf index 62d143f4b1..6954cc2f05 100644 --- a/fdio.infra.terraform/1n_nmd/minio/main.tf +++ b/fdio.infra.terraform/1n_nmd/minio/main.tf @@ -1,15 +1,15 @@ locals { - datacenters = join(",", var.nomad_datacenters) - minio_env_vars = join("\n", + datacenters = join(",", var.nomad_datacenters) + minio_env_vars = join("\n", concat([ ], var.minio_envs) ) - mc_env_vars = join("\n", + mc_env_vars = join("\n", concat([ ], var.mc_envs) ) mc_formatted_bucket_list = formatlist("LOCALMINIO/%s", var.minio_buckets) - mc_add_config_command = concat( + mc_add_config_command = concat( [ "mc", "config", @@ -25,8 +25,8 @@ locals { } data "template_file" "nomad_job_minio" { - template = file("${path.module}/conf/nomad/minio.hcl") - vars = { + template = file("${path.module}/conf/nomad/minio.hcl") + vars = { job_name = var.minio_job_name datacenters = local.datacenters use_canary = var.minio_use_canary @@ -51,8 +51,8 @@ data "template_file" "nomad_job_minio" { } data "template_file" "nomad_job_mc" { - template = file("${path.module}/conf/nomad/mc.hcl") - vars = { + template = file("${path.module}/conf/nomad/mc.hcl") + vars = { job_name = var.mc_job_name service_name = var.mc_service_name datacenters = local.datacenters @@ -68,8 +68,8 @@ data "template_file" "nomad_job_mc" { } resource "nomad_job" "nomad_job_minio" { - jobspec = data.template_file.nomad_job_minio.rendered - detach = false + jobspec = data.template_file.nomad_job_minio.rendered + detach = false } #resource "nomad_job" "nomad_job_mc" { diff --git a/fdio.infra.terraform/1n_nmd/minio/variables.tf b/fdio.infra.terraform/1n_nmd/minio/variables.tf index dbac3465ee..ab9d07f0d7 100644 --- a/fdio.infra.terraform/1n_nmd/minio/variables.tf +++ b/fdio.infra.terraform/1n_nmd/minio/variables.tf @@ -2,7 +2,7 @@ variable "nomad_datacenters" { description = "Nomad data centers" type = list(string) - default = [ "dc1" ] + default = ["dc1"] } variable "nomad_host_volume" { @@ -98,7 +98,7 @@ variable "minio_use_canary" { variable "minio_vault_secret" { description = "Set of properties to be able to fetch secret from vault" - type = object({ + type = object({ use_vault_provider = bool, vault_kv_policy_name = string, vault_kv_path = string, @@ -109,13 +109,13 @@ variable "minio_vault_secret" { variable "minio_resource_proxy" { description = "Minio proxy resources" - type = object({ - cpu = number, - memory = number + type = object({ + cpu = number, + memory = number }) - default = { - cpu = 200, - memory = 128 + default = { + cpu = 200, + memory = 128 } validation { condition = var.minio_resource_proxy.cpu >= 200 && var.minio_resource_proxy.memory >= 128 @@ -156,11 +156,11 @@ variable "minio_buckets" { variable "minio_upstreams" { description = "List of upstream services (list of object with service_name, port)" - type = list(object({ + type = list(object({ service_name = string, port = number, })) - default = [] + default = [] } variable "mc_extra_commands" { diff --git a/fdio.infra.terraform/1n_nmd/nginx/main.tf b/fdio.infra.terraform/1n_nmd/nginx/main.tf index 025fcb6b8b..fbd48bf645 100644 --- a/fdio.infra.terraform/1n_nmd/nginx/main.tf +++ b/fdio.infra.terraform/1n_nmd/nginx/main.tf @@ -3,8 +3,8 @@ locals { } data "template_file" "nomad_job_nginx" { - template = file("${path.module}/conf/nomad/nginx.hcl") - vars = { + template = file("${path.module}/conf/nomad/nginx.hcl") + vars = { job_name = var.nginx_job_name datacenters = local.datacenters use_host_volume = var.nginx_use_host_volume @@ -13,6 +13,6 @@ data "template_file" "nomad_job_nginx" { } resource "nomad_job" "nomad_job_nginx" { - jobspec = data.template_file.nomad_job_nginx.rendered - detach = false + jobspec = data.template_file.nomad_job_nginx.rendered + detach = false }
\ No newline at end of file diff --git a/fdio.infra.terraform/1n_nmd/nginx/variables.tf b/fdio.infra.terraform/1n_nmd/nginx/variables.tf index 0262014049..589ccee94a 100644 --- a/fdio.infra.terraform/1n_nmd/nginx/variables.tf +++ b/fdio.infra.terraform/1n_nmd/nginx/variables.tf @@ -2,7 +2,7 @@ variable "nomad_datacenters" { description = "Nomad data centers" type = list(string) - default = [ "dc1" ] + default = ["dc1"] } variable "nomad_host_volume" { diff --git a/fdio.infra.terraform/1n_nmd/prometheus/main.tf b/fdio.infra.terraform/1n_nmd/prometheus/main.tf index 9506ba3941..0c504c9ca4 100644 --- a/fdio.infra.terraform/1n_nmd/prometheus/main.tf +++ b/fdio.infra.terraform/1n_nmd/prometheus/main.tf @@ -1,5 +1,5 @@ locals { - datacenters = join(",", var.nomad_datacenters) + datacenters = join(",", var.nomad_datacenters) prometheus_url = join("", [ @@ -12,8 +12,8 @@ locals { } data "template_file" "nomad_job_prometheus" { - template = file("${path.module}/conf/nomad/prometheus.hcl") - vars = { + template = file("${path.module}/conf/nomad/prometheus.hcl") + vars = { datacenters = local.datacenters url = local.prometheus_url job_name = var.prometheus_job_name @@ -32,6 +32,6 @@ data "template_file" "nomad_job_prometheus" { } resource "nomad_job" "nomad_job_prometheus" { - jobspec = data.template_file.nomad_job_prometheus.rendered - detach = false + jobspec = data.template_file.nomad_job_prometheus.rendered + detach = false }
\ No newline at end of file diff --git a/fdio.infra.terraform/1n_nmd/prometheus/variables.tf b/fdio.infra.terraform/1n_nmd/prometheus/variables.tf index 55ffa33856..befd9412cb 100644 --- a/fdio.infra.terraform/1n_nmd/prometheus/variables.tf +++ b/fdio.infra.terraform/1n_nmd/prometheus/variables.tf @@ -2,7 +2,7 @@ variable "nomad_datacenters" { description = "Nomad data centers" type = list(string) - default = [ "dc1" ] + default = ["dc1"] } variable "nomad_host_volume" { @@ -44,7 +44,7 @@ variable "prometheus_use_canary" { variable "prometheus_vault_secret" { description = "Set of properties to be able to fetch secret from vault" - type = object({ + type = object({ use_vault_provider = bool, vault_kv_policy_name = string, vault_kv_path = string, diff --git a/fdio.infra.terraform/1n_nmd/providers.tf b/fdio.infra.terraform/1n_nmd/providers.tf index c7e0ad8bf3..c3b9ec275d 100644 --- a/fdio.infra.terraform/1n_nmd/providers.tf +++ b/fdio.infra.terraform/1n_nmd/providers.tf @@ -16,6 +16,6 @@ terraform { } provider "nomad" { - address = var.nomad_provider_address - alias = "yul1" + address = var.nomad_provider_address + alias = "yul1" }
\ No newline at end of file diff --git a/fdio.infra.terraform/1n_nmd/tools/artifacts.py b/fdio.infra.terraform/1n_nmd/tools/artifacts.py deleted file mode 100755 index 8c0a24af72..0000000000 --- a/fdio.infra.terraform/1n_nmd/tools/artifacts.py +++ /dev/null @@ -1,138 +0,0 @@ -#!/usr/bin/python3 - -# Copyright (c) 2021 Cisco and/or its affiliates. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Storage utilities library.""" - -import argparse -import gzip -import os -from mimetypes import MimeTypes - -from boto3 import resource -from botocore.client import Config - - -ENDPOINT_URL = u"http://storage.service.consul:9000" -AWS_ACCESS_KEY_ID = u"storage" -AWS_SECRET_ACCESS_KEY = u"Storage1234" -REGION_NAME = u"yul1" -COMPRESS_MIME = ( - u"text/html", - u"text/xml", - u"application/octet-stream" -) - - -def compress(src_fpath): - """Compress a single file. - - :param src_fpath: Input file path. - :type src_fpath: str - """ - with open(src_fpath, u"rb") as orig_file: - with gzip.open(f"{src_fpath}.gz", u"wb") as zipped_file: - zipped_file.writelines(orig_file) - - -def upload(storage, bucket, src_fpath, dst_fpath): - """Upload single file to destination bucket. - - :param storage: S3 storage resource. - :param bucket: S3 bucket name. - :param src_fpath: Input file path. - :param dst_fpath: Destination file path on remote storage. - :type storage: Object - :type bucket: str - :type src_fpath: str - :type dst_fpath: str - """ - mime = MimeTypes().guess_type(src_fpath)[0] - if not mime: - mime = "application/octet-stream" - - if mime in COMPRESS_MIME and bucket in "logs": - compress(src_fpath) - src_fpath = f"{src_fpath}.gz" - dst_fpath = f"{dst_fpath}.gz" - - storage.Bucket(f"{bucket}.fd.io").upload_file( - src_fpath, - dst_fpath, - ExtraArgs={ - u"ContentType": mime - } - ) - print(f"https://{bucket}.nginx.service.consul/{dst_fpath}") - - -def upload_recursive(storage, bucket, src_fpath): - """Recursively uploads input folder to destination. - - Example: - - bucket: logs - - src_fpath: /home/user - - dst_fpath: logs.fd.io/home/user - - :param storage: S3 storage resource. - :param bucket: S3 bucket name. - :param src_fpath: Input folder path. - :type storage: Object - :type bucket: str - :type src_fpath: str - """ - for path, _, files in os.walk(src_fpath): - for file in files: - _path = path.replace(src_fpath, u"") - _dir = src_fpath[1:] if src_fpath[0] == "/" else src_fpath - _dst_fpath = os.path.normpath(f"{_dir}/{_path}/{file}") - _src_fpath = os.path.join(path, file) - upload(storage, bucket, _src_fpath, _dst_fpath) - - -def main(): - """Main function for storage manipulation.""" - - parser = argparse.ArgumentParser() - parser.add_argument( - u"-d", u"--dir", required=True, type=str, - help=u"Directory to upload to storage." - ) - parser.add_argument( - u"-b", u"--bucket", required=True, type=str, - help=u"Target bucket on storage." - ) - args = parser.parse_args() - - # Create main storage resource. - storage = resource( - u"s3", - endpoint_url=ENDPOINT_URL, - aws_access_key_id=AWS_ACCESS_KEY_ID, - aws_secret_access_key=AWS_SECRET_ACCESS_KEY, - config=Config( - signature_version=u"s3v4" - ), - region_name=REGION_NAME - ) - - upload_recursive( - storage=storage, - bucket=args.bucket, - src_fpath=args.dir - ) - - -if __name__ == u"__main__": - main() diff --git a/fdio.infra.terraform/1n_nmd/vpp_device/main.tf b/fdio.infra.terraform/1n_nmd/vpp_device/main.tf index 80c02ec316..89b28ce385 100644 --- a/fdio.infra.terraform/1n_nmd/vpp_device/main.tf +++ b/fdio.infra.terraform/1n_nmd/vpp_device/main.tf @@ -3,8 +3,8 @@ locals { } data "template_file" "nomad_job_csit_shim" { - template = file("${path.module}/conf/nomad/csit_shim.hcl") - vars = { + template = file("${path.module}/conf/nomad/csit_shim.hcl") + vars = { datacenters = local.datacenters job_name = var.csit_shim_job_name group_count = var.csit_shim_group_count @@ -16,6 +16,6 @@ data "template_file" "nomad_job_csit_shim" { } resource "nomad_job" "nomad_job_csit_shim" { - jobspec = data.template_file.nomad_job_csit_shim.rendered - detach = false + jobspec = data.template_file.nomad_job_csit_shim.rendered + detach = false }
\ No newline at end of file diff --git a/fdio.infra.terraform/1n_nmd/vpp_device/variables.tf b/fdio.infra.terraform/1n_nmd/vpp_device/variables.tf index 893968bfc0..401be66f27 100644 --- a/fdio.infra.terraform/1n_nmd/vpp_device/variables.tf +++ b/fdio.infra.terraform/1n_nmd/vpp_device/variables.tf @@ -2,7 +2,7 @@ variable "nomad_datacenters" { description = "Nomad data centers" type = list(string) - default = [ "dc1" ] + default = ["dc1"] } # CSIT SHIM diff --git a/fdio.infra.terraform/2n_aws_c5n/deploy/main.tf b/fdio.infra.terraform/2n_aws_c5n/deploy/main.tf index 9bac6a7b35..1ea7e50152 100644 --- a/fdio.infra.terraform/2n_aws_c5n/deploy/main.tf +++ b/fdio.infra.terraform/2n_aws_c5n/deploy/main.tf @@ -1,220 +1,220 @@ data "vault_aws_access_credentials" "creds" { - backend = "${var.vault-name}-path" - role = "${var.vault-name}-role" + backend = "${var.vault-name}-path" + role = "${var.vault-name}-role" } resource "aws_vpc" "CSITVPC" { cidr_block = var.vpc_cidr_mgmt - tags = { - "Name" = "${var.resources_name_prefix}_${var.testbed_name}-vpc" - "Environment" = var.environment_name + tags = { + "Name" = "${var.resources_name_prefix}_${var.testbed_name}-vpc" + "Environment" = var.environment_name } } resource "aws_security_group" "CSITSG" { - name = "${var.resources_name_prefix}_${var.testbed_name}-sg" - description = "Allow inbound traffic" - vpc_id = aws_vpc.CSITVPC.id - depends_on = [aws_vpc.CSITVPC] + name = "${var.resources_name_prefix}_${var.testbed_name}-sg" + description = "Allow inbound traffic" + vpc_id = aws_vpc.CSITVPC.id + depends_on = [aws_vpc.CSITVPC] ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] } ingress { - from_port = 0 - to_port = 0 - protocol = -1 - self = true + from_port = 0 + to_port = 0 + protocol = -1 + self = true } egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] } tags = { - "Name" = "${var.resources_name_prefix}_${var.testbed_name}-sg" - "Environment" = var.environment_name + "Name" = "${var.resources_name_prefix}_${var.testbed_name}-sg" + "Environment" = var.environment_name } } resource "aws_vpc_ipv4_cidr_block_association" "b" { - vpc_id = aws_vpc.CSITVPC.id - cidr_block = var.vpc_cidr_b - depends_on = [aws_vpc.CSITVPC] + vpc_id = aws_vpc.CSITVPC.id + cidr_block = var.vpc_cidr_b + depends_on = [aws_vpc.CSITVPC] } resource "aws_vpc_ipv4_cidr_block_association" "c" { - vpc_id = aws_vpc.CSITVPC.id - cidr_block = var.vpc_cidr_c - depends_on = [aws_vpc.CSITVPC] + vpc_id = aws_vpc.CSITVPC.id + cidr_block = var.vpc_cidr_c + depends_on = [aws_vpc.CSITVPC] } resource "aws_vpc_ipv4_cidr_block_association" "d" { - vpc_id = aws_vpc.CSITVPC.id - cidr_block = var.vpc_cidr_d - depends_on = [aws_vpc.CSITVPC] + vpc_id = aws_vpc.CSITVPC.id + cidr_block = var.vpc_cidr_d + depends_on = [aws_vpc.CSITVPC] } # Subnets resource "aws_subnet" "mgmt" { - vpc_id = aws_vpc.CSITVPC.id - cidr_block = var.vpc_cidr_mgmt - availability_zone = var.avail_zone - depends_on = [aws_vpc.CSITVPC] + vpc_id = aws_vpc.CSITVPC.id + cidr_block = var.vpc_cidr_mgmt + availability_zone = var.avail_zone + depends_on = [aws_vpc.CSITVPC] tags = { - "Environment" = var.environment_name + "Environment" = var.environment_name } } resource "aws_subnet" "b" { - vpc_id = aws_vpc.CSITVPC.id - cidr_block = var.vpc_cidr_b - availability_zone = var.avail_zone - depends_on = [aws_vpc.CSITVPC, aws_vpc_ipv4_cidr_block_association.b] + vpc_id = aws_vpc.CSITVPC.id + cidr_block = var.vpc_cidr_b + availability_zone = var.avail_zone + depends_on = [aws_vpc.CSITVPC, aws_vpc_ipv4_cidr_block_association.b] tags = { - "Environment" = var.environment_name + "Environment" = var.environment_name } } resource "aws_subnet" "c" { - vpc_id = aws_vpc.CSITVPC.id - cidr_block = var.vpc_cidr_c - availability_zone = var.avail_zone - depends_on = [aws_vpc.CSITVPC, aws_vpc_ipv4_cidr_block_association.c] + vpc_id = aws_vpc.CSITVPC.id + cidr_block = var.vpc_cidr_c + availability_zone = var.avail_zone + depends_on = [aws_vpc.CSITVPC, aws_vpc_ipv4_cidr_block_association.c] tags = { - "Environment" = var.environment_name + "Environment" = var.environment_name } } resource "aws_subnet" "d" { - vpc_id = aws_vpc.CSITVPC.id - cidr_block = var.vpc_cidr_d - availability_zone = var.avail_zone - depends_on = [aws_vpc.CSITVPC, aws_vpc_ipv4_cidr_block_association.d] + vpc_id = aws_vpc.CSITVPC.id + cidr_block = var.vpc_cidr_d + availability_zone = var.avail_zone + depends_on = [aws_vpc.CSITVPC, aws_vpc_ipv4_cidr_block_association.d] tags = { - "Environment" = var.environment_name + "Environment" = var.environment_name } } resource "aws_internet_gateway" "CSITGW" { - vpc_id = aws_vpc.CSITVPC.id - depends_on = [aws_vpc.CSITVPC] + vpc_id = aws_vpc.CSITVPC.id + depends_on = [aws_vpc.CSITVPC] tags = { - "Environment" = var.environment_name + "Environment" = var.environment_name } } # SSH keypair # Temporary key for provisioning only resource "tls_private_key" "CSITTLS" { - algorithm = "RSA" - rsa_bits = 4096 + algorithm = "RSA" + rsa_bits = 4096 } resource "aws_key_pair" "CSITKP" { - key_name = "${var.resources_name_prefix}_${var.testbed_name}-key" - public_key = tls_private_key.CSITTLS.public_key_openssh + key_name = "${var.resources_name_prefix}_${var.testbed_name}-key" + public_key = tls_private_key.CSITTLS.public_key_openssh } resource "aws_placement_group" "CSITPG" { - name = "${var.resources_name_prefix}_${var.testbed_name}-pg" - strategy = "cluster" + name = "${var.resources_name_prefix}_${var.testbed_name}-pg" + strategy = "cluster" } # NICs resource "aws_network_interface" "dut1_if1" { - subnet_id = aws_subnet.b.id - source_dest_check = false - private_ip = var.dut1_if1_ip - private_ips = [var.dut1_if1_ip] - security_groups = [aws_security_group.CSITSG.id] - depends_on = [aws_vpc.CSITVPC, aws_subnet.b, aws_instance.dut1] + subnet_id = aws_subnet.b.id + source_dest_check = false + private_ip = var.dut1_if1_ip + private_ips = [var.dut1_if1_ip] + security_groups = [aws_security_group.CSITSG.id] + depends_on = [aws_vpc.CSITVPC, aws_subnet.b, aws_instance.dut1] attachment { - instance = aws_instance.dut1.id - device_index = 1 + instance = aws_instance.dut1.id + device_index = 1 } tags = { - "Environment" = var.environment_name + "Environment" = var.environment_name } } resource "aws_network_interface" "dut1_if2" { - subnet_id = aws_subnet.d.id - source_dest_check = false - private_ip = var.dut1_if2_ip - private_ips = [var.dut1_if2_ip] - security_groups = [aws_security_group.CSITSG.id] - depends_on = [aws_vpc.CSITVPC, aws_subnet.d, aws_instance.dut1] + subnet_id = aws_subnet.d.id + source_dest_check = false + private_ip = var.dut1_if2_ip + private_ips = [var.dut1_if2_ip] + security_groups = [aws_security_group.CSITSG.id] + depends_on = [aws_vpc.CSITVPC, aws_subnet.d, aws_instance.dut1] attachment { - instance = aws_instance.dut1.id - device_index = 2 + instance = aws_instance.dut1.id + device_index = 2 } tags = { - "Environment" = var.environment_name + "Environment" = var.environment_name } } resource "aws_network_interface" "tg_if1" { - subnet_id = aws_subnet.b.id - source_dest_check = false - private_ip = var.tg_if1_ip - private_ips = [var.tg_if1_ip] - security_groups = [aws_security_group.CSITSG.id] - depends_on = [aws_vpc.CSITVPC, aws_subnet.b, aws_instance.tg] + subnet_id = aws_subnet.b.id + source_dest_check = false + private_ip = var.tg_if1_ip + private_ips = [var.tg_if1_ip] + security_groups = [aws_security_group.CSITSG.id] + depends_on = [aws_vpc.CSITVPC, aws_subnet.b, aws_instance.tg] attachment { - instance = aws_instance.tg.id - device_index = 1 + instance = aws_instance.tg.id + device_index = 1 } - tags = { - "Environment" = var.environment_name + tags = { + "Environment" = var.environment_name } } resource "aws_network_interface" "tg_if2" { - subnet_id = aws_subnet.d.id - source_dest_check = false - private_ip = var.tg_if2_ip - private_ips = [var.tg_if2_ip] - security_groups = [aws_security_group.CSITSG.id] - depends_on = [aws_vpc.CSITVPC, aws_subnet.d, aws_instance.tg] + subnet_id = aws_subnet.d.id + source_dest_check = false + private_ip = var.tg_if2_ip + private_ips = [var.tg_if2_ip] + security_groups = [aws_security_group.CSITSG.id] + depends_on = [aws_vpc.CSITVPC, aws_subnet.d, aws_instance.tg] attachment { - instance = aws_instance.tg.id - device_index = 2 + instance = aws_instance.tg.id + device_index = 2 } - tags = { - "Environment" = var.environment_name + tags = { + "Environment" = var.environment_name } } data "aws_network_interface" "dut1_if1" { - id = aws_network_interface.dut1_if1.id + id = aws_network_interface.dut1_if1.id } data "aws_network_interface" "dut1_if2" { - id = aws_network_interface.dut1_if2.id + id = aws_network_interface.dut1_if2.id } data "aws_network_interface" "tg_if1" { - id = aws_network_interface.tg_if1.id + id = aws_network_interface.tg_if1.id } data "aws_network_interface" "tg_if2" { - id = aws_network_interface.tg_if2.id + id = aws_network_interface.tg_if2.id } # Instances @@ -238,8 +238,8 @@ resource "aws_instance" "tg" { } tags = { - "Name" = "${var.resources_name_prefix}_${var.testbed_name}-tg" - "Environment" = var.environment_name + "Name" = "${var.resources_name_prefix}_${var.testbed_name}-tg" + "Environment" = var.environment_name } } @@ -263,8 +263,8 @@ resource "aws_instance" "dut1" { } tags = { - "Name" = "${var.resources_name_prefix}_${var.testbed_name}-dut1" - "Environment" = var.environment_name + "Name" = "${var.resources_name_prefix}_${var.testbed_name}-dut1" + "Environment" = var.environment_name } } @@ -292,7 +292,7 @@ resource "aws_route" "dummy-trex-port-1" { # Deployment/Ansible resource "null_resource" "deploy_tg" { - depends_on = [ + depends_on = [ aws_instance.tg, aws_network_interface.tg_if1, aws_network_interface.tg_if2, @@ -302,23 +302,23 @@ resource "null_resource" "deploy_tg" { ] connection { - user = "ubuntu" - host = aws_instance.tg.public_ip - private_key = tls_private_key.CSITTLS.private_key_pem + user = "ubuntu" + host = aws_instance.tg.public_ip + private_key = tls_private_key.CSITTLS.private_key_pem } provisioner "remote-exec" { - inline = var.first_run_commands + inline = var.first_run_commands } provisioner "ansible" { plays { playbook { - file_path = var.ansible_file_path - force_handlers = true + file_path = var.ansible_file_path + force_handlers = true } - hosts = ["tg_aws"] - extra_vars = { + hosts = ["tg_aws"] + extra_vars = { ansible_ssh_pass = var.ansible_provision_pwd ansible_python_interpreter = var.ansible_python_executable aws = true @@ -327,13 +327,13 @@ resource "null_resource" "deploy_tg" { } provisioner "remote-exec" { - on_failure = continue - inline = ["sudo reboot"] + on_failure = continue + inline = ["sudo reboot"] } } resource "null_resource" "deploy_dut1" { - depends_on = [ + depends_on = [ aws_instance.tg, aws_network_interface.tg_if1, aws_network_interface.tg_if2, @@ -343,23 +343,23 @@ resource "null_resource" "deploy_dut1" { ] connection { - user = "ubuntu" - host = aws_instance.dut1.public_ip - private_key = tls_private_key.CSITTLS.private_key_pem + user = "ubuntu" + host = aws_instance.dut1.public_ip + private_key = tls_private_key.CSITTLS.private_key_pem } provisioner "remote-exec" { - inline = var.first_run_commands + inline = var.first_run_commands } provisioner "ansible" { plays { playbook { - file_path = var.ansible_file_path - force_handlers = true + file_path = var.ansible_file_path + force_handlers = true } - hosts = ["sut_aws"] - extra_vars = { + hosts = ["sut_aws"] + extra_vars = { ansible_ssh_pass = var.ansible_provision_pwd ansible_python_interpreter = var.ansible_python_executable aws = true @@ -368,21 +368,21 @@ resource "null_resource" "deploy_dut1" { } provisioner "remote-exec" { - on_failure = continue - inline = ["sudo reboot"] + on_failure = continue + inline = ["sudo reboot"] } } resource "null_resource" "deploy_topology" { - depends_on = [ aws_instance.tg, aws_instance.dut1 ] + depends_on = [aws_instance.tg, aws_instance.dut1] provisioner "ansible" { plays { playbook { file_path = var.ansible_topology_path } - hosts = ["local"] - extra_vars = { + hosts = ["local"] + extra_vars = { ansible_python_interpreter = var.ansible_python_executable testbed_name = var.testbed_name cloud_topology = var.topology_name diff --git a/fdio.infra.terraform/2n_aws_c5n/deploy/providers.tf b/fdio.infra.terraform/2n_aws_c5n/deploy/providers.tf index a74ebb2455..38244af0bd 100644 --- a/fdio.infra.terraform/2n_aws_c5n/deploy/providers.tf +++ b/fdio.infra.terraform/2n_aws_c5n/deploy/providers.tf @@ -1,7 +1,7 @@ provider "aws" { - region = var.region - access_key = data.vault_aws_access_credentials.creds.access_key - secret_key = data.vault_aws_access_credentials.creds.secret_key + region = var.region + access_key = data.vault_aws_access_credentials.creds.access_key + secret_key = data.vault_aws_access_credentials.creds.secret_key } provider "vault" { diff --git a/fdio.infra.terraform/2n_aws_c5n/deploy/variables.tf b/fdio.infra.terraform/2n_aws_c5n/deploy/variables.tf index 4c1dddf1e5..6e9ffd183d 100644 --- a/fdio.infra.terraform/2n_aws_c5n/deploy/variables.tf +++ b/fdio.infra.terraform/2n_aws_c5n/deploy/variables.tf @@ -1,148 +1,148 @@ variable "region" { - description = "AWS Region" - type = string + description = "AWS Region" + type = string } variable "vault-name" { - default = "dynamic-aws-creds-vault-admin" + default = "dynamic-aws-creds-vault-admin" } variable "ami_image" { - description = "AWS AMI image name" - type = string + description = "AWS AMI image name" + type = string } variable "testbed_name" { - description = "Testbed name" - type = string + description = "Testbed name" + type = string } variable "instance_initiated_shutdown_behavior" { - description = "Shutdown behavior for the instance" - type = string - default = "terminate" + description = "Shutdown behavior for the instance" + type = string + default = "terminate" } variable "instance_type" { - description = "AWS instance type" - type = string + description = "AWS instance type" + type = string } variable "avail_zone" { - description = "AWS availability zone" - type = string + description = "AWS availability zone" + type = string } variable "topology_name" { - description = "Prefix used when creating a topology file" - type = string - default = "2n_aws_c5n" + description = "Prefix used when creating a topology file" + type = string + default = "2n_aws_c5n" } variable "environment_name" { - description = "Environment name - used for Environment tag" - type = string - default = "CSIT-AWS" + description = "Environment name - used for Environment tag" + type = string + default = "CSIT-AWS" } variable "resources_name_prefix" { - description = "Resource prefix - used for Name tag" - type = string - default = "CSIT_2n_aws_c5n" + description = "Resource prefix - used for Name tag" + type = string + default = "CSIT_2n_aws_c5n" } variable "first_run_commands" { - description = "Commands to run after deployment via remote-exec" - type = list(string) - default = [""] + description = "Commands to run after deployment via remote-exec" + type = list(string) + default = [""] } variable "ansible_file_path" { - description = "Path to Ansible playbook" - type = string - default = "../../fdio.infra.ansible/site.yaml" + description = "Path to Ansible playbook" + type = string + default = "../../fdio.infra.ansible/site.yaml" } variable "ansible_python_executable" { - description = "Path to Python interpreter" - type = string - default = "/usr/bin/python3" + description = "Path to Python interpreter" + type = string + default = "/usr/bin/python3" } variable "ansible_topology_path" { - description = "Path to Ansible playbook which creates a topology file" - type = string - default = "../../fdio.infra.ansible/cloud_topology.yaml" + description = "Path to Ansible playbook which creates a topology file" + type = string + default = "../../fdio.infra.ansible/cloud_topology.yaml" } variable "ansible_provision_pwd" { - description = "Password used for ansible provisioning (ansible_ssh_pass)" - type = string - default = "Csit1234" + description = "Password used for ansible provisioning (ansible_ssh_pass)" + type = string + default = "Csit1234" } # Base VPC CIDRs variable "vpc_cidr_mgmt" { - description = "Management CIDR block" - type = string - default = "192.168.0.0/24" + description = "Management CIDR block" + type = string + default = "192.168.0.0/24" } variable "vpc_cidr_b" { - description = "CIDR block B" - type = string - default = "192.168.10.0/24" + description = "CIDR block B" + type = string + default = "192.168.10.0/24" } variable "vpc_cidr_c" { - description = "CIDR block C" - type = string - default = "200.0.0.0/24" + description = "CIDR block C" + type = string + default = "200.0.0.0/24" } variable "vpc_cidr_d" { - description = "CIDR block D" - type = string - default = "192.168.20.0/24" + description = "CIDR block D" + type = string + default = "192.168.20.0/24" } # Trex Dummy CIDRs variable "trex_dummy_cidr_port_0" { - description = "TREX dummy CIDR" - type = string - default = "10.0.0.0/24" + description = "TREX dummy CIDR" + type = string + default = "10.0.0.0/24" } variable "trex_dummy_cidr_port_1" { - description = "TREX dummy CIDR" - type = string - default = "20.0.0.0/24" + description = "TREX dummy CIDR" + type = string + default = "20.0.0.0/24" } # IPs variable "tg_if1_ip" { - description = "TG IP on interface 1" - type = string - default = "192.168.10.254" + description = "TG IP on interface 1" + type = string + default = "192.168.10.254" } variable "tg_if2_ip" { - description = "TG IP on interface 2" - type = string - default = "192.168.20.254" + description = "TG IP on interface 2" + type = string + default = "192.168.20.254" } variable "dut1_if1_ip" { - description = "DUT IP on interface 1" - type = string - default = "192.168.10.11" + description = "DUT IP on interface 1" + type = string + default = "192.168.10.11" } variable "dut1_if2_ip" { - description = "DUT IP on interface 1" - type = string - default = "192.168.20.11" + description = "DUT IP on interface 1" + type = string + default = "192.168.20.11" } variable "tg_mgmt_ip" { - description = "TG management interface IP" - type = string - default = "192.168.0.10" + description = "TG management interface IP" + type = string + default = "192.168.0.10" } variable "dut1_mgmt_ip" { - description = "DUT management interface IP" - type = string - default = "192.168.0.11" + description = "DUT management interface IP" + type = string + default = "192.168.0.11" } diff --git a/fdio.infra.terraform/2n_aws_c5n/deploy/versions.tf b/fdio.infra.terraform/2n_aws_c5n/deploy/versions.tf index 05fa5502b5..03ae56c8ff 100644 --- a/fdio.infra.terraform/2n_aws_c5n/deploy/versions.tf +++ b/fdio.infra.terraform/2n_aws_c5n/deploy/versions.tf @@ -1,19 +1,19 @@ terraform { required_providers { - aws = { - source = "hashicorp/aws" - version = "~> 3.32.0" + aws = { + source = "hashicorp/aws" + version = "~> 3.32.0" } - null = { - source = "hashicorp/null" - version = "~> 3.0.0" + null = { + source = "hashicorp/null" + version = "~> 3.0.0" } - tls = { - source = "hashicorp/tls" - version = "~> 3.0.0" + tls = { + source = "hashicorp/tls" + version = "~> 3.0.0" } - vault = { - version = ">=2.22.1" + vault = { + version = ">=2.22.1" } } required_version = ">= 1.0.3" diff --git a/fdio.infra.terraform/2n_aws_c5n/main.tf b/fdio.infra.terraform/2n_aws_c5n/main.tf index 1aebabd3de..a39eff64a8 100644 --- a/fdio.infra.terraform/2n_aws_c5n/main.tf +++ b/fdio.infra.terraform/2n_aws_c5n/main.tf @@ -1,35 +1,35 @@ module "deploy" { - source = "./deploy" + source = "./deploy" # Parameters starting with var. can be set using "TF_VAR_*" environment variables # or -var parameter when running "terraform apply", for default values see ./variables.tf - testbed_name = var.testbed_name - topology_name = "2n_aws_c5n" - environment_name = "CSIT-AWS" - resources_name_prefix = "CSIT_2n_aws_c5n" + testbed_name = var.testbed_name + topology_name = "2n_aws_c5n" + environment_name = "CSIT-AWS" + resources_name_prefix = "CSIT_2n_aws_c5n" # AWS general - region = var.region - avail_zone = var.avail_zone - instance_type = var.instance_type - ami_image = var.ami_image + region = var.region + avail_zone = var.avail_zone + instance_type = var.instance_type + ami_image = var.ami_image # AWS Network - vpc_cidr_mgmt = "192.168.0.0/24" - vpc_cidr_b = "192.168.10.0/24" - vpc_cidr_c = "200.0.0.0/24" - vpc_cidr_d = "192.168.20.0/24" + vpc_cidr_mgmt = "192.168.0.0/24" + vpc_cidr_b = "192.168.10.0/24" + vpc_cidr_c = "200.0.0.0/24" + vpc_cidr_d = "192.168.20.0/24" - tg_mgmt_ip = "192.168.0.10" - dut1_mgmt_ip = "192.168.0.11" + tg_mgmt_ip = "192.168.0.10" + dut1_mgmt_ip = "192.168.0.11" - tg_if1_ip = "192.168.10.254" - tg_if2_ip = "192.168.20.254" - dut1_if1_ip = "192.168.10.11" - dut1_if2_ip = "192.168.20.11" + tg_if1_ip = "192.168.10.254" + tg_if2_ip = "192.168.20.254" + dut1_if1_ip = "192.168.10.11" + dut1_if2_ip = "192.168.20.11" - trex_dummy_cidr_port_0 = "10.0.0.0/24" - trex_dummy_cidr_port_1 = "20.0.0.0/24" + trex_dummy_cidr_port_0 = "10.0.0.0/24" + trex_dummy_cidr_port_1 = "20.0.0.0/24" # Ansible ansible_python_executable = "/usr/bin/python3" @@ -39,7 +39,7 @@ module "deploy" { # First run # TODO: Remove the testuser creation when added to user_add ansible role - first_run_commands = [ + first_run_commands = [ "sudo sed -i 's/^PasswordAuthentication/#PasswordAuthentication/' /etc/ssh/sshd_config", "sudo systemctl restart sshd", "sudo useradd --create-home -s /bin/bash provisionuser", diff --git a/fdio.infra.terraform/2n_aws_c5n/variables.tf b/fdio.infra.terraform/2n_aws_c5n/variables.tf index 7c6009b6f8..88ebda6d9d 100644 --- a/fdio.infra.terraform/2n_aws_c5n/variables.tf +++ b/fdio.infra.terraform/2n_aws_c5n/variables.tf @@ -1,35 +1,35 @@ variable "region" { - description = "AWS Region" - type = string - default = "eu-central-1" + description = "AWS Region" + type = string + default = "eu-central-1" } variable "vault-name" { - default = "dynamic-aws-creds-vault-admin" + default = "dynamic-aws-creds-vault-admin" } variable "avail_zone" { - description = "AWS availability zone" - type = string - default = "eu-central-1a" + description = "AWS availability zone" + type = string + default = "eu-central-1a" } variable "ami_image" { # eu-central-1/focal-20.04-amd64-hvm-ssd-20210119.1 # kernel 5.4.0-1035-aws (~5.4.0-65) - description = "AWS AMI image ID" - type = string - default = "ami-0a875db8a031a9efb" + description = "AWS AMI image ID" + type = string + default = "ami-0a875db8a031a9efb" } variable "instance_type" { - description = "AWS instance type" - type = string - default = "c5n.9xlarge" + description = "AWS instance type" + type = string + default = "c5n.9xlarge" } variable "testbed_name" { - description = "Testbed name" - type = string - default = "testbed1" + description = "Testbed name" + type = string + default = "testbed1" } diff --git a/fdio.infra.terraform/3n_aws_c5n/deploy/main.tf b/fdio.infra.terraform/3n_aws_c5n/deploy/main.tf index 5201990c09..0969ca4abf 100644 --- a/fdio.infra.terraform/3n_aws_c5n/deploy/main.tf +++ b/fdio.infra.terraform/3n_aws_c5n/deploy/main.tf @@ -1,264 +1,264 @@ data "vault_aws_access_credentials" "creds" { - backend = "${var.vault-name}-path" - role = "${var.vault-name}-role" + backend = "${var.vault-name}-path" + role = "${var.vault-name}-role" } resource "aws_vpc" "CSITVPC" { - cidr_block = var.vpc_cidr_mgmt + cidr_block = var.vpc_cidr_mgmt - tags = { - "Name" = "${var.resources_name_prefix}_${var.testbed_name}-vpc" - "Environment" = var.environment_name + tags = { + "Name" = "${var.resources_name_prefix}_${var.testbed_name}-vpc" + "Environment" = var.environment_name } } resource "aws_security_group" "CSITSG" { - name = "${var.resources_name_prefix}_${var.testbed_name}-sg" - description = "Allow inbound traffic" - vpc_id = aws_vpc.CSITVPC.id - depends_on = [aws_vpc.CSITVPC] + name = "${var.resources_name_prefix}_${var.testbed_name}-sg" + description = "Allow inbound traffic" + vpc_id = aws_vpc.CSITVPC.id + depends_on = [aws_vpc.CSITVPC] ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] } ingress { - from_port = 0 - to_port = 0 - protocol = -1 - self = true + from_port = 0 + to_port = 0 + protocol = -1 + self = true } egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] } - tags = { - "Name" = "${var.resources_name_prefix}_${var.testbed_name}-sg" - "Environment" = var.environment_name + tags = { + "Name" = "${var.resources_name_prefix}_${var.testbed_name}-sg" + "Environment" = var.environment_name } } resource "aws_vpc_ipv4_cidr_block_association" "b" { - vpc_id = aws_vpc.CSITVPC.id - cidr_block = var.vpc_cidr_b - depends_on = [aws_vpc.CSITVPC] + vpc_id = aws_vpc.CSITVPC.id + cidr_block = var.vpc_cidr_b + depends_on = [aws_vpc.CSITVPC] } resource "aws_vpc_ipv4_cidr_block_association" "c" { - vpc_id = aws_vpc.CSITVPC.id - cidr_block = var.vpc_cidr_c - depends_on = [aws_vpc.CSITVPC] + vpc_id = aws_vpc.CSITVPC.id + cidr_block = var.vpc_cidr_c + depends_on = [aws_vpc.CSITVPC] } resource "aws_vpc_ipv4_cidr_block_association" "d" { - vpc_id = aws_vpc.CSITVPC.id - cidr_block = var.vpc_cidr_d - depends_on = [aws_vpc.CSITVPC] + vpc_id = aws_vpc.CSITVPC.id + cidr_block = var.vpc_cidr_d + depends_on = [aws_vpc.CSITVPC] } # Subnets resource "aws_subnet" "mgmt" { - vpc_id = aws_vpc.CSITVPC.id - cidr_block = var.vpc_cidr_mgmt - availability_zone = var.avail_zone - depends_on = [aws_vpc.CSITVPC] + vpc_id = aws_vpc.CSITVPC.id + cidr_block = var.vpc_cidr_mgmt + availability_zone = var.avail_zone + depends_on = [aws_vpc.CSITVPC] - tags = { - "Environment" = var.environment_name + tags = { + "Environment" = var.environment_name } } resource "aws_subnet" "b" { - vpc_id = aws_vpc.CSITVPC.id - cidr_block = var.vpc_cidr_b - availability_zone = var.avail_zone - depends_on = [aws_vpc.CSITVPC, aws_vpc_ipv4_cidr_block_association.b] + vpc_id = aws_vpc.CSITVPC.id + cidr_block = var.vpc_cidr_b + availability_zone = var.avail_zone + depends_on = [aws_vpc.CSITVPC, aws_vpc_ipv4_cidr_block_association.b] - tags = { - "Environment" = var.environment_name + tags = { + "Environment" = var.environment_name } } resource "aws_subnet" "c" { - vpc_id = aws_vpc.CSITVPC.id - cidr_block = var.vpc_cidr_c - availability_zone = var.avail_zone - depends_on = [aws_vpc.CSITVPC, aws_vpc_ipv4_cidr_block_association.c] + vpc_id = aws_vpc.CSITVPC.id + cidr_block = var.vpc_cidr_c + availability_zone = var.avail_zone + depends_on = [aws_vpc.CSITVPC, aws_vpc_ipv4_cidr_block_association.c] - tags = { - "Environment" = var.environment_name + tags = { + "Environment" = var.environment_name } } resource "aws_subnet" "d" { - vpc_id = aws_vpc.CSITVPC.id - cidr_block = var.vpc_cidr_d - availability_zone = var.avail_zone - depends_on = [aws_vpc.CSITVPC, aws_vpc_ipv4_cidr_block_association.d] + vpc_id = aws_vpc.CSITVPC.id + cidr_block = var.vpc_cidr_d + availability_zone = var.avail_zone + depends_on = [aws_vpc.CSITVPC, aws_vpc_ipv4_cidr_block_association.d] - tags = { - "Environment" = var.environment_name + tags = { + "Environment" = var.environment_name } } resource "aws_internet_gateway" "CSITGW" { - vpc_id = aws_vpc.CSITVPC.id - depends_on = [aws_vpc.CSITVPC] + vpc_id = aws_vpc.CSITVPC.id + depends_on = [aws_vpc.CSITVPC] - tags = { - "Environment" = var.environment_name + tags = { + "Environment" = var.environment_name } } # SSH keypair # Temporary key for provisioning only resource "tls_private_key" "CSITTLS" { - algorithm = "RSA" - rsa_bits = 4096 + algorithm = "RSA" + rsa_bits = 4096 } resource "aws_key_pair" "CSITKP" { - key_name = "CSIT_3n_aws_c5n_${var.testbed_name}-key" - public_key = tls_private_key.CSITTLS.public_key_openssh + key_name = "CSIT_3n_aws_c5n_${var.testbed_name}-key" + public_key = tls_private_key.CSITTLS.public_key_openssh } resource "aws_placement_group" "CSITPG" { - name = "${var.resources_name_prefix}_${var.testbed_name}-pg" - strategy = "cluster" + name = "${var.resources_name_prefix}_${var.testbed_name}-pg" + strategy = "cluster" } # NICs resource "aws_network_interface" "dut1_if1" { - subnet_id = aws_subnet.b.id - source_dest_check = false - private_ip = var.dut1_if1_ip - private_ips = [var.dut1_if1_ip] - security_groups = [aws_security_group.CSITSG.id] - depends_on = [aws_vpc.CSITVPC, aws_subnet.b, aws_instance.dut1] + subnet_id = aws_subnet.b.id + source_dest_check = false + private_ip = var.dut1_if1_ip + private_ips = [var.dut1_if1_ip] + security_groups = [aws_security_group.CSITSG.id] + depends_on = [aws_vpc.CSITVPC, aws_subnet.b, aws_instance.dut1] attachment { - instance = aws_instance.dut1.id - device_index = 1 + instance = aws_instance.dut1.id + device_index = 1 } - tags = { - "Environment" = var.environment_name + tags = { + "Environment" = var.environment_name } } resource "aws_network_interface" "dut1_if2" { - subnet_id = aws_subnet.c.id - source_dest_check = false - private_ip = var.dut1_if2_ip - private_ips = [var.dut1_if2_ip] - security_groups = [aws_security_group.CSITSG.id] - depends_on = [aws_vpc.CSITVPC, aws_subnet.c, aws_instance.dut1] + subnet_id = aws_subnet.c.id + source_dest_check = false + private_ip = var.dut1_if2_ip + private_ips = [var.dut1_if2_ip] + security_groups = [aws_security_group.CSITSG.id] + depends_on = [aws_vpc.CSITVPC, aws_subnet.c, aws_instance.dut1] attachment { - instance = aws_instance.dut1.id - device_index = 2 + instance = aws_instance.dut1.id + device_index = 2 } - tags = { - "Environment" = var.environment_name + tags = { + "Environment" = var.environment_name } } resource "aws_network_interface" "dut2_if1" { - subnet_id = aws_subnet.c.id - source_dest_check = false - private_ip = var.dut2_if1_ip - private_ips = [var.dut2_if1_ip] - security_groups = [aws_security_group.CSITSG.id] - depends_on = [aws_vpc.CSITVPC, aws_subnet.c, aws_instance.dut2] + subnet_id = aws_subnet.c.id + source_dest_check = false + private_ip = var.dut2_if1_ip + private_ips = [var.dut2_if1_ip] + security_groups = [aws_security_group.CSITSG.id] + depends_on = [aws_vpc.CSITVPC, aws_subnet.c, aws_instance.dut2] attachment { - instance = aws_instance.dut2.id - device_index = 1 + instance = aws_instance.dut2.id + device_index = 1 } - tags = { - "Environment" = var.environment_name + tags = { + "Environment" = var.environment_name } } resource "aws_network_interface" "dut2_if2" { - subnet_id = aws_subnet.d.id - source_dest_check = false - private_ip = var.dut2_if2_ip - private_ips = [var.dut2_if2_ip] - security_groups = [aws_security_group.CSITSG.id] - depends_on = [aws_vpc.CSITVPC, aws_subnet.d, aws_instance.dut2] + subnet_id = aws_subnet.d.id + source_dest_check = false + private_ip = var.dut2_if2_ip + private_ips = [var.dut2_if2_ip] + security_groups = [aws_security_group.CSITSG.id] + depends_on = [aws_vpc.CSITVPC, aws_subnet.d, aws_instance.dut2] attachment { - instance = aws_instance.dut2.id - device_index = 2 + instance = aws_instance.dut2.id + device_index = 2 } - tags = { - "Environment" = var.environment_name + tags = { + "Environment" = var.environment_name } } resource "aws_network_interface" "tg_if1" { - subnet_id = aws_subnet.b.id - source_dest_check = false - private_ip = var.tg_if1_ip - private_ips = [var.tg_if1_ip] - security_groups = [aws_security_group.CSITSG.id] - depends_on = [aws_vpc.CSITVPC, aws_subnet.b, aws_instance.tg] + subnet_id = aws_subnet.b.id + source_dest_check = false + private_ip = var.tg_if1_ip + private_ips = [var.tg_if1_ip] + security_groups = [aws_security_group.CSITSG.id] + depends_on = [aws_vpc.CSITVPC, aws_subnet.b, aws_instance.tg] attachment { - instance = aws_instance.tg.id - device_index = 1 + instance = aws_instance.tg.id + device_index = 1 } tags = { - "Environment" = var.environment_name + "Environment" = var.environment_name } } resource "aws_network_interface" "tg_if2" { - subnet_id = aws_subnet.d.id - source_dest_check = false - private_ip = var.tg_if2_ip - private_ips = [var.tg_if2_ip] - security_groups = [aws_security_group.CSITSG.id] - depends_on = [aws_vpc.CSITVPC, aws_subnet.d, aws_instance.tg] + subnet_id = aws_subnet.d.id + source_dest_check = false + private_ip = var.tg_if2_ip + private_ips = [var.tg_if2_ip] + security_groups = [aws_security_group.CSITSG.id] + depends_on = [aws_vpc.CSITVPC, aws_subnet.d, aws_instance.tg] attachment { - instance = aws_instance.tg.id - device_index = 2 + instance = aws_instance.tg.id + device_index = 2 } tags = { - "Environment" = var.environment_name + "Environment" = var.environment_name } } data "aws_network_interface" "dut1_if1" { - id = aws_network_interface.dut1_if1.id + id = aws_network_interface.dut1_if1.id } data "aws_network_interface" "dut1_if2" { - id = aws_network_interface.dut1_if2.id + id = aws_network_interface.dut1_if2.id } data "aws_network_interface" "dut2_if1" { - id = aws_network_interface.dut2_if1.id + id = aws_network_interface.dut2_if1.id } data "aws_network_interface" "dut2_if2" { - id = aws_network_interface.dut2_if2.id + id = aws_network_interface.dut2_if2.id } data "aws_network_interface" "tg_if1" { - id = aws_network_interface.tg_if1.id + id = aws_network_interface.tg_if1.id } data "aws_network_interface" "tg_if2" { - id = aws_network_interface.tg_if2.id + id = aws_network_interface.tg_if2.id } # Instances @@ -276,18 +276,18 @@ resource "aws_instance" "tg" { source_dest_check = false # host_id = "1" - depends_on = [ + depends_on = [ aws_vpc.CSITVPC, aws_placement_group.CSITPG, ] root_block_device { - volume_size = 50 + volume_size = 50 } - tags = { - "Name" = "${var.resources_name_prefix}_${var.testbed_name}-tg" - "Environment" = var.environment_name + tags = { + "Name" = "${var.resources_name_prefix}_${var.testbed_name}-tg" + "Environment" = var.environment_name } } @@ -305,19 +305,19 @@ resource "aws_instance" "dut1" { source_dest_check = false # host_id = "2" - depends_on = [ + depends_on = [ aws_vpc.CSITVPC, aws_placement_group.CSITPG, aws_instance.tg ] root_block_device { - volume_size = 50 + volume_size = 50 } - tags = { - "Name" = "${var.resources_name_prefix}_${var.testbed_name}-dut1" - "Environment" = var.environment_name + tags = { + "Name" = "${var.resources_name_prefix}_${var.testbed_name}-dut1" + "Environment" = var.environment_name } } @@ -334,7 +334,7 @@ resource "aws_instance" "dut2" { source_dest_check = false # host_id = "3" - depends_on = [ + depends_on = [ aws_vpc.CSITVPC, aws_placement_group.CSITPG, aws_instance.tg, @@ -342,12 +342,12 @@ resource "aws_instance" "dut2" { ] root_block_device { - volume_size = 50 + volume_size = 50 } - tags = { - "Name" = "${var.resources_name_prefix}_${var.testbed_name}-dut2" - "Environment" = var.environment_name + tags = { + "Name" = "${var.resources_name_prefix}_${var.testbed_name}-dut2" + "Environment" = var.environment_name } } @@ -373,7 +373,7 @@ resource "aws_route" "dummy-trex-port-1" { # Deployment/Ansible resource "null_resource" "deploy_tg" { - depends_on = [ + depends_on = [ aws_instance.tg, aws_network_interface.tg_if1, aws_network_interface.tg_if2, @@ -386,23 +386,23 @@ resource "null_resource" "deploy_tg" { ] connection { - user = "ubuntu" - host = aws_instance.tg.public_ip - private_key = tls_private_key.CSITTLS.private_key_pem + user = "ubuntu" + host = aws_instance.tg.public_ip + private_key = tls_private_key.CSITTLS.private_key_pem } provisioner "remote-exec" { - inline = var.first_run_commands + inline = var.first_run_commands } provisioner "ansible" { plays { playbook { - file_path = var.ansible_file_path - force_handlers = true + file_path = var.ansible_file_path + force_handlers = true } - hosts = ["tg_aws"] - extra_vars = { + hosts = ["tg_aws"] + extra_vars = { ansible_ssh_pass = var.ansible_provision_pwd ansible_python_interpreter = var.ansible_python_executable aws = true @@ -411,13 +411,13 @@ resource "null_resource" "deploy_tg" { } provisioner "remote-exec" { - on_failure = continue - inline = ["sudo reboot"] + on_failure = continue + inline = ["sudo reboot"] } } resource "null_resource" "deploy_dut1" { - depends_on = [ + depends_on = [ aws_instance.tg, aws_network_interface.tg_if1, aws_network_interface.tg_if2, @@ -430,23 +430,23 @@ resource "null_resource" "deploy_dut1" { ] connection { - user = "ubuntu" - host = aws_instance.dut1.public_ip - private_key = tls_private_key.CSITTLS.private_key_pem + user = "ubuntu" + host = aws_instance.dut1.public_ip + private_key = tls_private_key.CSITTLS.private_key_pem } provisioner "remote-exec" { - inline = var.first_run_commands + inline = var.first_run_commands } provisioner "ansible" { plays { playbook { - file_path = var.ansible_file_path - force_handlers = true + file_path = var.ansible_file_path + force_handlers = true } - hosts = ["sut_aws"] - extra_vars = { + hosts = ["sut_aws"] + extra_vars = { ansible_ssh_pass = var.ansible_provision_pwd ansible_python_interpreter = var.ansible_python_executable aws = true @@ -455,13 +455,13 @@ resource "null_resource" "deploy_dut1" { } provisioner "remote-exec" { - on_failure = continue - inline = ["sudo reboot"] + on_failure = continue + inline = ["sudo reboot"] } } resource "null_resource" "deploy_dut2" { - depends_on = [ + depends_on = [ aws_instance.tg, aws_network_interface.tg_if1, aws_network_interface.tg_if2, @@ -474,23 +474,23 @@ resource "null_resource" "deploy_dut2" { ] connection { - user = "ubuntu" - host = aws_instance.dut2.public_ip - private_key = tls_private_key.CSITTLS.private_key_pem + user = "ubuntu" + host = aws_instance.dut2.public_ip + private_key = tls_private_key.CSITTLS.private_key_pem } provisioner "remote-exec" { - inline = var.first_run_commands + inline = var.first_run_commands } provisioner "ansible" { plays { playbook { - file_path = var.ansible_file_path - force_handlers = true + file_path = var.ansible_file_path + force_handlers = true } - hosts = ["sut_aws"] - extra_vars = { + hosts = ["sut_aws"] + extra_vars = { ansible_ssh_pass = var.ansible_provision_pwd ansible_python_interpreter = var.ansible_python_executable aws = true @@ -499,21 +499,21 @@ resource "null_resource" "deploy_dut2" { } provisioner "remote-exec" { - on_failure = continue - inline = ["sudo reboot"] + on_failure = continue + inline = ["sudo reboot"] } } resource "null_resource" "deploy_topology" { - depends_on = [ aws_instance.tg, aws_instance.dut1, aws_instance.dut2 ] + depends_on = [aws_instance.tg, aws_instance.dut1, aws_instance.dut2] provisioner "ansible" { plays { playbook { - file_path = var.ansible_topology_path + file_path = var.ansible_topology_path } - hosts = ["local"] - extra_vars = { + hosts = ["local"] + extra_vars = { ansible_python_interpreter = var.ansible_python_executable testbed_name = var.testbed_name cloud_topology = var.topology_name diff --git a/fdio.infra.terraform/3n_aws_c5n/deploy/providers.tf b/fdio.infra.terraform/3n_aws_c5n/deploy/providers.tf index a74ebb2455..38244af0bd 100644 --- a/fdio.infra.terraform/3n_aws_c5n/deploy/providers.tf +++ b/fdio.infra.terraform/3n_aws_c5n/deploy/providers.tf @@ -1,7 +1,7 @@ provider "aws" { - region = var.region - access_key = data.vault_aws_access_credentials.creds.access_key - secret_key = data.vault_aws_access_credentials.creds.secret_key + region = var.region + access_key = data.vault_aws_access_credentials.creds.access_key + secret_key = data.vault_aws_access_credentials.creds.secret_key } provider "vault" { diff --git a/fdio.infra.terraform/3n_aws_c5n/deploy/variables.tf b/fdio.infra.terraform/3n_aws_c5n/deploy/variables.tf index 735e6c6dd8..aa784622d7 100644 --- a/fdio.infra.terraform/3n_aws_c5n/deploy/variables.tf +++ b/fdio.infra.terraform/3n_aws_c5n/deploy/variables.tf @@ -1,163 +1,163 @@ variable "region" { - description = "AWS Region" - type = string + description = "AWS Region" + type = string } variable "vault-name" { - default = "dynamic-aws-creds-vault-admin" + default = "dynamic-aws-creds-vault-admin" } variable "ami_image" { - description = "AWS AMI image name" - type = string + description = "AWS AMI image name" + type = string } variable "testbed_name" { - description = "Testbed name" - type = string + description = "Testbed name" + type = string } variable "instance_initiated_shutdown_behavior" { - description = "Shutdown behavior for the instance" - type = string - default = "terminate" + description = "Shutdown behavior for the instance" + type = string + default = "terminate" } variable "instance_type" { - description = "AWS instance type" - type = string + description = "AWS instance type" + type = string } variable "avail_zone" { - description = "AWS availability zone" - type = string + description = "AWS availability zone" + type = string } variable "topology_name" { - description = "Prefix used when creating a topology file" - type = string - default = "3n_aws_c5n" + description = "Prefix used when creating a topology file" + type = string + default = "3n_aws_c5n" } variable "environment_name" { - description = "Environment name - used for Environment tag" - type = string - default = "CSIT-AWS" + description = "Environment name - used for Environment tag" + type = string + default = "CSIT-AWS" } variable "resources_name_prefix" { - description = "Resource prefix - used for Name tag" - type = string - default = "CSIT_3n_aws_c5n" + description = "Resource prefix - used for Name tag" + type = string + default = "CSIT_3n_aws_c5n" } variable "first_run_commands" { - description = "Commands to run after deployment via remote-exec" - type = list(string) - default = [""] + description = "Commands to run after deployment via remote-exec" + type = list(string) + default = [""] } variable "ansible_file_path" { - description = "Path to Ansible playbook" - type = string - default = "../../fdio.infra.ansible/site.yaml" + description = "Path to Ansible playbook" + type = string + default = "../../fdio.infra.ansible/site.yaml" } variable "ansible_python_executable" { - description = "Path to Python interpreter" - type = string - default = "/usr/bin/python3" + description = "Path to Python interpreter" + type = string + default = "/usr/bin/python3" } variable "ansible_topology_path" { - description = "Path to Ansible playbook which creates a topology file" - type = string - default = "../../fdio.infra.ansible/cloud_topology.yaml" + description = "Path to Ansible playbook which creates a topology file" + type = string + default = "../../fdio.infra.ansible/cloud_topology.yaml" } variable "ansible_provision_pwd" { - description = "Password used for ansible provisioning (ansible_ssh_pass)" - type = string - default = "Csit1234" + description = "Password used for ansible provisioning (ansible_ssh_pass)" + type = string + default = "Csit1234" } # Base VPC CIDRs variable "vpc_cidr_mgmt" { - description = "Management CIDR block" - type = string - default = "192.168.0.0/24" + description = "Management CIDR block" + type = string + default = "192.168.0.0/24" } variable "vpc_cidr_b" { - description = "CIDR block B" - type = string - default = "192.168.10.0/24" + description = "CIDR block B" + type = string + default = "192.168.10.0/24" } variable "vpc_cidr_c" { - description = "CIDR block C" - type = string - default = "200.0.0.0/24" + description = "CIDR block C" + type = string + default = "200.0.0.0/24" } variable "vpc_cidr_d" { - description = "CIDR block D" - type = string - default = "192.168.20.0/24" + description = "CIDR block D" + type = string + default = "192.168.20.0/24" } # Trex Dummy CIDRs variable "trex_dummy_cidr_port_0" { - description = "TREX dummy CIDR" - type = string - default = "10.0.0.0/24" + description = "TREX dummy CIDR" + type = string + default = "10.0.0.0/24" } variable "trex_dummy_cidr_port_1" { - description = "TREX dummy CIDR" - type = string - default = "20.0.0.0/24" + description = "TREX dummy CIDR" + type = string + default = "20.0.0.0/24" } # IPs variable "tg_if1_ip" { - description = "TG IP on interface 1" - type = string - default = "192.168.10.254" + description = "TG IP on interface 1" + type = string + default = "192.168.10.254" } variable "tg_if2_ip" { - description = "TG IP on interface 2" - type = string - default = "192.168.20.254" + description = "TG IP on interface 2" + type = string + default = "192.168.20.254" } variable "dut1_if1_ip" { - description = "DUT IP on interface 1" - type = string - default = "192.168.10.11" + description = "DUT IP on interface 1" + type = string + default = "192.168.10.11" } variable "dut1_if2_ip" { - description = "DUT IP on interface 2" - type = string - default = "200.0.0.101" + description = "DUT IP on interface 2" + type = string + default = "200.0.0.101" } variable "dut2_if1_ip" { - description = "DUT2 IP on interface 1" - type = string - default = "200.0.0.102" + description = "DUT2 IP on interface 1" + type = string + default = "200.0.0.102" } variable "dut2_if2_ip" { - description = "DUT2 IP on interface 2" - type = string - default = "192.168.20.11" + description = "DUT2 IP on interface 2" + type = string + default = "192.168.20.11" } variable "tg_mgmt_ip" { - description = "TG management interface IP" - type = string - default = "192.168.0.10" + description = "TG management interface IP" + type = string + default = "192.168.0.10" } variable "dut1_mgmt_ip" { - description = "DUT1 management interface IP" - type = string - default = "192.168.0.11" + description = "DUT1 management interface IP" + type = string + default = "192.168.0.11" } variable "dut2_mgmt_ip" { - description = "DUT2 management interface IP" - type = string - default = "192.168.0.12" + description = "DUT2 management interface IP" + type = string + default = "192.168.0.12" } diff --git a/fdio.infra.terraform/3n_aws_c5n/deploy/versions.tf b/fdio.infra.terraform/3n_aws_c5n/deploy/versions.tf index 05fa5502b5..03ae56c8ff 100644 --- a/fdio.infra.terraform/3n_aws_c5n/deploy/versions.tf +++ b/fdio.infra.terraform/3n_aws_c5n/deploy/versions.tf @@ -1,19 +1,19 @@ terraform { required_providers { - aws = { - source = "hashicorp/aws" - version = "~> 3.32.0" + aws = { + source = "hashicorp/aws" + version = "~> 3.32.0" } - null = { - source = "hashicorp/null" - version = "~> 3.0.0" + null = { + source = "hashicorp/null" + version = "~> 3.0.0" } - tls = { - source = "hashicorp/tls" - version = "~> 3.0.0" + tls = { + source = "hashicorp/tls" + version = "~> 3.0.0" } - vault = { - version = ">=2.22.1" + vault = { + version = ">=2.22.1" } } required_version = ">= 1.0.3" diff --git a/fdio.infra.terraform/3n_aws_c5n/main.tf b/fdio.infra.terraform/3n_aws_c5n/main.tf index fca5eecff3..ed87161eb5 100644 --- a/fdio.infra.terraform/3n_aws_c5n/main.tf +++ b/fdio.infra.terraform/3n_aws_c5n/main.tf @@ -1,38 +1,38 @@ module "deploy" { - source = "./deploy" + source = "./deploy" # Parameters starting with var. can be set using "TF_VAR_*" environment variables # or -var parameter when running "terraform apply", for default values see ./variables.tf - testbed_name = var.testbed_name - topology_name = "3n_aws_c5n" - environment_name = "CSIT-AWS" - resources_name_prefix = "CSIT_3n_aws_c5n" + testbed_name = var.testbed_name + topology_name = "3n_aws_c5n" + environment_name = "CSIT-AWS" + resources_name_prefix = "CSIT_3n_aws_c5n" # AWS general - region = var.region - avail_zone = var.avail_zone - instance_type = var.instance_type - ami_image = var.ami_image + region = var.region + avail_zone = var.avail_zone + instance_type = var.instance_type + ami_image = var.ami_image # AWS Network - vpc_cidr_mgmt = "192.168.0.0/24" - vpc_cidr_b = "192.168.10.0/24" - vpc_cidr_c = "200.0.0.0/24" - vpc_cidr_d = "192.168.20.0/24" + vpc_cidr_mgmt = "192.168.0.0/24" + vpc_cidr_b = "192.168.10.0/24" + vpc_cidr_c = "200.0.0.0/24" + vpc_cidr_d = "192.168.20.0/24" - tg_mgmt_ip = "192.168.0.10" - dut1_mgmt_ip = "192.168.0.11" - dut2_mgmt_ip = "192.168.0.12" + tg_mgmt_ip = "192.168.0.10" + dut1_mgmt_ip = "192.168.0.11" + dut2_mgmt_ip = "192.168.0.12" - tg_if1_ip = "192.168.10.254" - tg_if2_ip = "192.168.20.254" - dut1_if1_ip = "192.168.10.11" - dut1_if2_ip = "200.0.0.101" - dut2_if1_ip = "200.0.0.102" - dut2_if2_ip = "192.168.20.11" + tg_if1_ip = "192.168.10.254" + tg_if2_ip = "192.168.20.254" + dut1_if1_ip = "192.168.10.11" + dut1_if2_ip = "200.0.0.101" + dut2_if1_ip = "200.0.0.102" + dut2_if2_ip = "192.168.20.11" - trex_dummy_cidr_port_0 = "10.0.0.0/24" - trex_dummy_cidr_port_1 = "20.0.0.0/24" + trex_dummy_cidr_port_0 = "10.0.0.0/24" + trex_dummy_cidr_port_1 = "20.0.0.0/24" # Ansible ansible_python_executable = "/usr/bin/python3" @@ -42,7 +42,7 @@ module "deploy" { # First run # TODO: Remove the testuser creation when added to user_add ansible role - first_run_commands = [ + first_run_commands = [ "sudo sed -i 's/^PasswordAuthentication/#PasswordAuthentication/' /etc/ssh/sshd_config", "sudo systemctl restart sshd", "sudo useradd --create-home -s /bin/bash provisionuser", diff --git a/fdio.infra.terraform/3n_aws_c5n/variables.tf b/fdio.infra.terraform/3n_aws_c5n/variables.tf index d3bb8e41a5..c87b1c3cfa 100644 --- a/fdio.infra.terraform/3n_aws_c5n/variables.tf +++ b/fdio.infra.terraform/3n_aws_c5n/variables.tf @@ -1,37 +1,37 @@ variable "region" { - description = "AWS Region" - type = string - default = "eu-central-1" + description = "AWS Region" + type = string + default = "eu-central-1" } variable "avail_zone" { - description = "AWS availability zone" - type = string - default = "eu-central-1a" + description = "AWS availability zone" + type = string + default = "eu-central-1a" } variable "ami_image" { # eu-central-1/focal-20.04-amd64-hvm-ssd-20210119.1 # kernel 5.4.0-1035-aws (~5.4.0-65) - description = "AWS AMI image ID" - type = string - default = "ami-0a875db8a031a9efb" + description = "AWS AMI image ID" + type = string + default = "ami-0a875db8a031a9efb" } variable "instance_initiated_shutdown_behavior" { - description = "Shutdown behavior for the instance" - type = string - default = "terminate" + description = "Shutdown behavior for the instance" + type = string + default = "terminate" } variable "instance_type" { - description = "AWS instance type" - type = string - default = "c5n.9xlarge" + description = "AWS instance type" + type = string + default = "c5n.9xlarge" } variable "testbed_name" { - description = "Testbed name" - type = string - default = "testbed1" + description = "Testbed name" + type = string + default = "testbed1" } diff --git a/fdio.infra.terraform/3n_azure_fsv2/main.tf b/fdio.infra.terraform/3n_azure_fsv2/main.tf index 87d02aa923..f84f521ecd 100644 --- a/fdio.infra.terraform/3n_azure_fsv2/main.tf +++ b/fdio.infra.terraform/3n_azure_fsv2/main.tf @@ -5,44 +5,44 @@ provider "azurerm" { # Variables variable "vpc_addr_space_a" { - type = string + type = string default = "172.16.0.0/16" } variable "vpc_cidr_a" { - type = string + type = string default = "172.16.0.0/24" } variable "vpc_cidr_b" { - type = string + type = string default = "172.16.10.0/24" } variable "vpc_cidr_c" { - type = string + type = string default = "172.16.200.0/24" } variable "vpc_cidr_d" { - type = string + type = string default = "172.16.20.0/24" } variable "trex_dummy_cidr_port_0" { - type = string + type = string default = "172.16.11.0/24" } variable "trex_dummy_cidr_port_1" { - type = string + type = string default = "172.16.21.0/24" } # Create resource group and resources resource "azurerm_resource_group" "CSIT" { - name = "CSIT" + name = "CSIT" #location = "East US" location = "UK South" } @@ -51,8 +51,8 @@ resource "azurerm_virtual_network" "CSIT" { name = "CSIT-network" resource_group_name = azurerm_resource_group.CSIT.name location = azurerm_resource_group.CSIT.location - address_space = [ var.vpc_addr_space_a ] - depends_on = [ azurerm_resource_group.CSIT ] + address_space = [var.vpc_addr_space_a] + depends_on = [azurerm_resource_group.CSIT] } resource "azurerm_subnet" "a" { @@ -60,7 +60,7 @@ resource "azurerm_subnet" "a" { resource_group_name = azurerm_resource_group.CSIT.name virtual_network_name = azurerm_virtual_network.CSIT.name address_prefix = var.vpc_cidr_a - depends_on = [ azurerm_resource_group.CSIT ] + depends_on = [azurerm_resource_group.CSIT] } resource "azurerm_subnet" "b" { @@ -68,7 +68,7 @@ resource "azurerm_subnet" "b" { resource_group_name = azurerm_resource_group.CSIT.name virtual_network_name = azurerm_virtual_network.CSIT.name address_prefix = var.vpc_cidr_b - depends_on = [ azurerm_resource_group.CSIT ] + depends_on = [azurerm_resource_group.CSIT] } resource "azurerm_subnet" "c" { @@ -76,7 +76,7 @@ resource "azurerm_subnet" "c" { resource_group_name = azurerm_resource_group.CSIT.name virtual_network_name = azurerm_virtual_network.CSIT.name address_prefix = var.vpc_cidr_c - depends_on = [ azurerm_resource_group.CSIT ] + depends_on = [azurerm_resource_group.CSIT] } resource "azurerm_subnet" "d" { @@ -84,7 +84,7 @@ resource "azurerm_subnet" "d" { resource_group_name = azurerm_resource_group.CSIT.name virtual_network_name = azurerm_virtual_network.CSIT.name address_prefix = var.vpc_cidr_d - depends_on = [ azurerm_resource_group.CSIT ] + depends_on = [azurerm_resource_group.CSIT] } # Create a security group of the Kiknos instances @@ -154,175 +154,175 @@ resource "azurerm_network_security_group" "CSIT" { # Create public IPs resource "azurerm_public_ip" "tg_public_ip" { - name = "tg_public_ip" - location = azurerm_resource_group.CSIT.location - resource_group_name = azurerm_resource_group.CSIT.name - allocation_method = "Dynamic" - depends_on = [ azurerm_resource_group.CSIT ] + name = "tg_public_ip" + location = azurerm_resource_group.CSIT.location + resource_group_name = azurerm_resource_group.CSIT.name + allocation_method = "Dynamic" + depends_on = [azurerm_resource_group.CSIT] } resource "azurerm_public_ip" "dut1_public_ip" { - name = "dut1_public_ip" - location = azurerm_resource_group.CSIT.location - resource_group_name = azurerm_resource_group.CSIT.name - allocation_method = "Dynamic" - depends_on = [ azurerm_resource_group.CSIT ] + name = "dut1_public_ip" + location = azurerm_resource_group.CSIT.location + resource_group_name = azurerm_resource_group.CSIT.name + allocation_method = "Dynamic" + depends_on = [azurerm_resource_group.CSIT] } resource "azurerm_public_ip" "dut2_public_ip" { - name = "dut2_public_ip" - location = azurerm_resource_group.CSIT.location - resource_group_name = azurerm_resource_group.CSIT.name - allocation_method = "Dynamic" - depends_on = [ azurerm_resource_group.CSIT ] + name = "dut2_public_ip" + location = azurerm_resource_group.CSIT.location + resource_group_name = azurerm_resource_group.CSIT.name + allocation_method = "Dynamic" + depends_on = [azurerm_resource_group.CSIT] } # Create network interface resource "azurerm_network_interface" "tg_mng" { - name = "tg_mng" - location = azurerm_resource_group.CSIT.location - resource_group_name = azurerm_resource_group.CSIT.name - network_security_group_id = azurerm_network_security_group.CSIT.id - ip_configuration { - primary = "true" - name = "tg_mng_ip" - subnet_id = azurerm_subnet.a.id - private_ip_address_allocation = "Static" - private_ip_address = "172.16.0.10" - public_ip_address_id = azurerm_public_ip.tg_public_ip.id - } - depends_on = [ azurerm_resource_group.CSIT, - azurerm_subnet.a, - azurerm_public_ip.tg_public_ip ] + name = "tg_mng" + location = azurerm_resource_group.CSIT.location + resource_group_name = azurerm_resource_group.CSIT.name + network_security_group_id = azurerm_network_security_group.CSIT.id + ip_configuration { + primary = "true" + name = "tg_mng_ip" + subnet_id = azurerm_subnet.a.id + private_ip_address_allocation = "Static" + private_ip_address = "172.16.0.10" + public_ip_address_id = azurerm_public_ip.tg_public_ip.id + } + depends_on = [azurerm_resource_group.CSIT, + azurerm_subnet.a, + azurerm_public_ip.tg_public_ip] } resource "azurerm_network_interface" "dut1_mng" { - name = "dut1_mng" - location = azurerm_resource_group.CSIT.location - resource_group_name = azurerm_resource_group.CSIT.name - network_security_group_id = azurerm_network_security_group.CSIT.id - ip_configuration { - primary = "true" - name = "dut1_mng_ip" - subnet_id = azurerm_subnet.a.id - private_ip_address_allocation = "Static" - private_ip_address = "172.16.0.11" - public_ip_address_id = azurerm_public_ip.dut1_public_ip.id - } - depends_on = [ azurerm_resource_group.CSIT, - azurerm_subnet.a, - azurerm_public_ip.dut1_public_ip ] + name = "dut1_mng" + location = azurerm_resource_group.CSIT.location + resource_group_name = azurerm_resource_group.CSIT.name + network_security_group_id = azurerm_network_security_group.CSIT.id + ip_configuration { + primary = "true" + name = "dut1_mng_ip" + subnet_id = azurerm_subnet.a.id + private_ip_address_allocation = "Static" + private_ip_address = "172.16.0.11" + public_ip_address_id = azurerm_public_ip.dut1_public_ip.id + } + depends_on = [azurerm_resource_group.CSIT, + azurerm_subnet.a, + azurerm_public_ip.dut1_public_ip] } resource "azurerm_network_interface" "dut2_mng" { - name = "dut2_mng" - location = azurerm_resource_group.CSIT.location - resource_group_name = azurerm_resource_group.CSIT.name - network_security_group_id = azurerm_network_security_group.CSIT.id - ip_configuration { - primary = "true" - name = "dut2_mng_ip" - subnet_id = azurerm_subnet.a.id - private_ip_address_allocation = "Static" - private_ip_address = "172.16.0.12" - public_ip_address_id = azurerm_public_ip.dut2_public_ip.id - } - depends_on = [ azurerm_resource_group.CSIT, - azurerm_subnet.a, - azurerm_public_ip.dut2_public_ip ] + name = "dut2_mng" + location = azurerm_resource_group.CSIT.location + resource_group_name = azurerm_resource_group.CSIT.name + network_security_group_id = azurerm_network_security_group.CSIT.id + ip_configuration { + primary = "true" + name = "dut2_mng_ip" + subnet_id = azurerm_subnet.a.id + private_ip_address_allocation = "Static" + private_ip_address = "172.16.0.12" + public_ip_address_id = azurerm_public_ip.dut2_public_ip.id + } + depends_on = [azurerm_resource_group.CSIT, + azurerm_subnet.a, + azurerm_public_ip.dut2_public_ip] } resource "azurerm_route_table" "b" { - name = "b" - location = azurerm_resource_group.CSIT.location - resource_group_name = azurerm_resource_group.CSIT.name - depends_on = [ azurerm_resource_group.CSIT, - azurerm_subnet.b ] + name = "b" + location = azurerm_resource_group.CSIT.location + resource_group_name = azurerm_resource_group.CSIT.name + depends_on = [azurerm_resource_group.CSIT, + azurerm_subnet.b] disable_bgp_route_propagation = false route { - name = "route-10" - address_prefix = var.trex_dummy_cidr_port_0 - next_hop_type = "VirtualAppliance" - next_hop_in_ip_address = data.azurerm_network_interface.tg_if1.private_ip_address + name = "route-10" + address_prefix = var.trex_dummy_cidr_port_0 + next_hop_type = "VirtualAppliance" + next_hop_in_ip_address = data.azurerm_network_interface.tg_if1.private_ip_address } route { - name = "route-20" - address_prefix = var.trex_dummy_cidr_port_1 - next_hop_type = "VirtualAppliance" - next_hop_in_ip_address = data.azurerm_network_interface.dut1_if1.private_ip_address + name = "route-20" + address_prefix = var.trex_dummy_cidr_port_1 + next_hop_type = "VirtualAppliance" + next_hop_in_ip_address = data.azurerm_network_interface.dut1_if1.private_ip_address } route { - name = "tg2" - address_prefix = var.vpc_cidr_d - next_hop_type = "VirtualAppliance" - next_hop_in_ip_address = data.azurerm_network_interface.dut1_if1.private_ip_address + name = "tg2" + address_prefix = var.vpc_cidr_d + next_hop_type = "VirtualAppliance" + next_hop_in_ip_address = data.azurerm_network_interface.dut1_if1.private_ip_address } } resource "azurerm_route_table" "c" { - name = "c" - location = azurerm_resource_group.CSIT.location - resource_group_name = azurerm_resource_group.CSIT.name - depends_on = [ azurerm_resource_group.CSIT, - azurerm_subnet.c ] + name = "c" + location = azurerm_resource_group.CSIT.location + resource_group_name = azurerm_resource_group.CSIT.name + depends_on = [azurerm_resource_group.CSIT, + azurerm_subnet.c] disable_bgp_route_propagation = false route { - name = "route-10" - address_prefix = var.trex_dummy_cidr_port_0 - next_hop_type = "VirtualAppliance" - next_hop_in_ip_address = data.azurerm_network_interface.dut1_if2.private_ip_address + name = "route-10" + address_prefix = var.trex_dummy_cidr_port_0 + next_hop_type = "VirtualAppliance" + next_hop_in_ip_address = data.azurerm_network_interface.dut1_if2.private_ip_address } route { - name = "route-100" - address_prefix = "100.0.0.0/8" - next_hop_type = "VirtualAppliance" - next_hop_in_ip_address = data.azurerm_network_interface.dut1_if2.private_ip_address + name = "route-100" + address_prefix = "100.0.0.0/8" + next_hop_type = "VirtualAppliance" + next_hop_in_ip_address = data.azurerm_network_interface.dut1_if2.private_ip_address } route { - name = "route-20" - address_prefix = var.trex_dummy_cidr_port_1 - next_hop_type = "VirtualAppliance" - next_hop_in_ip_address = data.azurerm_network_interface.dut2_if1.private_ip_address + name = "route-20" + address_prefix = var.trex_dummy_cidr_port_1 + next_hop_type = "VirtualAppliance" + next_hop_in_ip_address = data.azurerm_network_interface.dut2_if1.private_ip_address } route { - name = "tg1" - address_prefix = var.vpc_cidr_b - next_hop_type = "VirtualAppliance" - next_hop_in_ip_address = data.azurerm_network_interface.dut1_if2.private_ip_address + name = "tg1" + address_prefix = var.vpc_cidr_b + next_hop_type = "VirtualAppliance" + next_hop_in_ip_address = data.azurerm_network_interface.dut1_if2.private_ip_address } route { - name = "tg2" - address_prefix = var.vpc_cidr_d - next_hop_type = "VirtualAppliance" - next_hop_in_ip_address = data.azurerm_network_interface.dut2_if1.private_ip_address + name = "tg2" + address_prefix = var.vpc_cidr_d + next_hop_type = "VirtualAppliance" + next_hop_in_ip_address = data.azurerm_network_interface.dut2_if1.private_ip_address } } resource "azurerm_route_table" "d" { - name = "d" - location = azurerm_resource_group.CSIT.location - resource_group_name = azurerm_resource_group.CSIT.name - depends_on = [ azurerm_resource_group.CSIT, - azurerm_subnet.d ] + name = "d" + location = azurerm_resource_group.CSIT.location + resource_group_name = azurerm_resource_group.CSIT.name + depends_on = [azurerm_resource_group.CSIT, + azurerm_subnet.d] disable_bgp_route_propagation = false route { - name = "route-10" - address_prefix = var.trex_dummy_cidr_port_0 - next_hop_type = "VirtualAppliance" - next_hop_in_ip_address = data.azurerm_network_interface.dut2_if2.private_ip_address + name = "route-10" + address_prefix = var.trex_dummy_cidr_port_0 + next_hop_type = "VirtualAppliance" + next_hop_in_ip_address = data.azurerm_network_interface.dut2_if2.private_ip_address } route { - name = "route-20" - address_prefix = var.trex_dummy_cidr_port_1 - next_hop_type = "VirtualAppliance" - next_hop_in_ip_address = data.azurerm_network_interface.tg_if2.private_ip_address + name = "route-20" + address_prefix = var.trex_dummy_cidr_port_1 + next_hop_type = "VirtualAppliance" + next_hop_in_ip_address = data.azurerm_network_interface.tg_if2.private_ip_address } route { - name = "tg1" - address_prefix = var.vpc_cidr_b - next_hop_type = "VirtualAppliance" - next_hop_in_ip_address = data.azurerm_network_interface.dut2_if2.private_ip_address + name = "tg1" + address_prefix = var.vpc_cidr_b + next_hop_type = "VirtualAppliance" + next_hop_in_ip_address = data.azurerm_network_interface.dut2_if2.private_ip_address } } @@ -342,221 +342,221 @@ resource "azurerm_subnet_route_table_association" "d" { } resource "azurerm_virtual_machine" "tg" { - name = "tg" - location = azurerm_resource_group.CSIT.location - resource_group_name = azurerm_resource_group.CSIT.name - primary_network_interface_id = azurerm_network_interface.tg_mng.id - network_interface_ids = [ azurerm_network_interface.tg_mng.id, - azurerm_network_interface.tg_if1.id, - azurerm_network_interface.tg_if2.id ] - vm_size = "Standard_F32s_v2" - delete_os_disk_on_termination = true - delete_data_disks_on_termination = true - storage_os_disk { - name = "OsDiskTG" - caching = "ReadWrite" - create_option = "FromImage" - managed_disk_type = "StandardSSD_LRS" - } - storage_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "18.04-LTS" - version = "latest" - } - os_profile { - computer_name = "tg" - admin_username = "ubuntu" - } - os_profile_linux_config { - disable_password_authentication = true - ssh_keys { - path = "/home/ubuntu/.ssh/authorized_keys" - key_data = file("~/.ssh/id_rsa.pub") - } + name = "tg" + location = azurerm_resource_group.CSIT.location + resource_group_name = azurerm_resource_group.CSIT.name + primary_network_interface_id = azurerm_network_interface.tg_mng.id + network_interface_ids = [azurerm_network_interface.tg_mng.id, + azurerm_network_interface.tg_if1.id, + azurerm_network_interface.tg_if2.id] + vm_size = "Standard_F32s_v2" + delete_os_disk_on_termination = true + delete_data_disks_on_termination = true + storage_os_disk { + name = "OsDiskTG" + caching = "ReadWrite" + create_option = "FromImage" + managed_disk_type = "StandardSSD_LRS" + } + storage_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "18.04-LTS" + version = "latest" + } + os_profile { + computer_name = "tg" + admin_username = "ubuntu" + } + os_profile_linux_config { + disable_password_authentication = true + ssh_keys { + path = "/home/ubuntu/.ssh/authorized_keys" + key_data = file("~/.ssh/id_rsa.pub") } - depends_on = [ azurerm_resource_group.CSIT, - azurerm_network_interface.tg_mng ] + } + depends_on = [azurerm_resource_group.CSIT, + azurerm_network_interface.tg_mng] } resource "azurerm_virtual_machine" "dut1" { - name = "dut1" - location = azurerm_resource_group.CSIT.location - resource_group_name = azurerm_resource_group.CSIT.name - primary_network_interface_id = azurerm_network_interface.dut1_mng.id - network_interface_ids = [ azurerm_network_interface.dut1_mng.id, - azurerm_network_interface.dut1_if1.id, - azurerm_network_interface.dut1_if2.id ] - vm_size = "Standard_F32s_v2" - delete_os_disk_on_termination = true - delete_data_disks_on_termination = true - storage_os_disk { - name = "OsDiskDUT1" - caching = "ReadWrite" - create_option = "FromImage" - managed_disk_type = "StandardSSD_LRS" - } - storage_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "18.04-LTS" - version = "latest" - } - os_profile { - computer_name = "dut1" - admin_username = "ubuntu" - } - os_profile_linux_config { - disable_password_authentication = true - ssh_keys { - path = "/home/ubuntu/.ssh/authorized_keys" - key_data = file("~/.ssh/id_rsa.pub") - } + name = "dut1" + location = azurerm_resource_group.CSIT.location + resource_group_name = azurerm_resource_group.CSIT.name + primary_network_interface_id = azurerm_network_interface.dut1_mng.id + network_interface_ids = [azurerm_network_interface.dut1_mng.id, + azurerm_network_interface.dut1_if1.id, + azurerm_network_interface.dut1_if2.id] + vm_size = "Standard_F32s_v2" + delete_os_disk_on_termination = true + delete_data_disks_on_termination = true + storage_os_disk { + name = "OsDiskDUT1" + caching = "ReadWrite" + create_option = "FromImage" + managed_disk_type = "StandardSSD_LRS" + } + storage_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "18.04-LTS" + version = "latest" + } + os_profile { + computer_name = "dut1" + admin_username = "ubuntu" + } + os_profile_linux_config { + disable_password_authentication = true + ssh_keys { + path = "/home/ubuntu/.ssh/authorized_keys" + key_data = file("~/.ssh/id_rsa.pub") } - depends_on = [ azurerm_resource_group.CSIT, - azurerm_network_interface.dut1_mng ] + } + depends_on = [azurerm_resource_group.CSIT, + azurerm_network_interface.dut1_mng] } resource "azurerm_virtual_machine" "dut2" { - name = "dut2" - location = azurerm_resource_group.CSIT.location - resource_group_name = azurerm_resource_group.CSIT.name - primary_network_interface_id = azurerm_network_interface.dut2_mng.id - network_interface_ids = [ azurerm_network_interface.dut2_mng.id, - azurerm_network_interface.dut2_if1.id, - azurerm_network_interface.dut2_if2.id ] - vm_size = "Standard_F32s_v2" - delete_os_disk_on_termination = true - delete_data_disks_on_termination = true - storage_os_disk { - name = "OsDiskDUT2" - caching = "ReadWrite" - create_option = "FromImage" - managed_disk_type = "StandardSSD_LRS" - } - storage_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "18.04-LTS" - version = "latest" - } - os_profile { - computer_name = "dut2" - admin_username = "ubuntu" - } - os_profile_linux_config { - disable_password_authentication = true - ssh_keys { - path = "/home/ubuntu/.ssh/authorized_keys" - key_data = file("~/.ssh/id_rsa.pub") - } + name = "dut2" + location = azurerm_resource_group.CSIT.location + resource_group_name = azurerm_resource_group.CSIT.name + primary_network_interface_id = azurerm_network_interface.dut2_mng.id + network_interface_ids = [azurerm_network_interface.dut2_mng.id, + azurerm_network_interface.dut2_if1.id, + azurerm_network_interface.dut2_if2.id] + vm_size = "Standard_F32s_v2" + delete_os_disk_on_termination = true + delete_data_disks_on_termination = true + storage_os_disk { + name = "OsDiskDUT2" + caching = "ReadWrite" + create_option = "FromImage" + managed_disk_type = "StandardSSD_LRS" + } + storage_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "18.04-LTS" + version = "latest" + } + os_profile { + computer_name = "dut2" + admin_username = "ubuntu" + } + os_profile_linux_config { + disable_password_authentication = true + ssh_keys { + path = "/home/ubuntu/.ssh/authorized_keys" + key_data = file("~/.ssh/id_rsa.pub") } - depends_on = [ azurerm_resource_group.CSIT, - azurerm_network_interface.dut2_mng ] + } + depends_on = [azurerm_resource_group.CSIT, + azurerm_network_interface.dut2_mng] } data "azurerm_public_ip" "tg_public_ip" { name = "tg_public_ip" resource_group_name = azurerm_resource_group.CSIT.name - depends_on = [ azurerm_virtual_machine.tg ] + depends_on = [azurerm_virtual_machine.tg] } data "azurerm_public_ip" "dut1_public_ip" { name = "dut1_public_ip" resource_group_name = azurerm_resource_group.CSIT.name - depends_on = [ azurerm_virtual_machine.dut1 ] + depends_on = [azurerm_virtual_machine.dut1] } data "azurerm_public_ip" "dut2_public_ip" { name = "dut2_public_ip" resource_group_name = azurerm_resource_group.CSIT.name - depends_on = [ azurerm_virtual_machine.dut2 ] + depends_on = [azurerm_virtual_machine.dut2] } # Provisioning resource "null_resource" "deploy_tg" { - depends_on = [ azurerm_virtual_machine.tg, - azurerm_network_interface.tg_if1, - azurerm_network_interface.tg_if2 ] + depends_on = [azurerm_virtual_machine.tg, + azurerm_network_interface.tg_if1, + azurerm_network_interface.tg_if2] connection { - user = "ubuntu" - host = data.azurerm_public_ip.tg_public_ip.ip_address + user = "ubuntu" + host = data.azurerm_public_ip.tg_public_ip.ip_address private_key = file("~/.ssh/id_rsa") } provisioner "ansible" { plays { playbook { - file_path = "../../testbed-setup/ansible/site.yaml" + file_path = "../../testbed-setup/ansible/site.yaml" force_handlers = true } hosts = ["tg_azure"] extra_vars = { ansible_python_interpreter = "/usr/bin/python3" - azure = true + azure = true } } } } resource "null_resource" "deploy_dut1" { - depends_on = [ azurerm_virtual_machine.dut1, - azurerm_network_interface.dut1_if1, - azurerm_network_interface.dut1_if2 ] + depends_on = [azurerm_virtual_machine.dut1, + azurerm_network_interface.dut1_if1, + azurerm_network_interface.dut1_if2] connection { - user = "ubuntu" - host = data.azurerm_public_ip.dut1_public_ip.ip_address + user = "ubuntu" + host = data.azurerm_public_ip.dut1_public_ip.ip_address private_key = file("~/.ssh/id_rsa") } provisioner "ansible" { plays { playbook { - file_path = "../../testbed-setup/ansible/site.yaml" + file_path = "../../testbed-setup/ansible/site.yaml" force_handlers = true } hosts = ["sut_azure"] extra_vars = { ansible_python_interpreter = "/usr/bin/python3" - azure = true + azure = true } } } } resource "null_resource" "deploy_dut2" { - depends_on = [ azurerm_virtual_machine.dut2, - azurerm_network_interface.dut2_if1, - azurerm_network_interface.dut2_if2 ] + depends_on = [azurerm_virtual_machine.dut2, + azurerm_network_interface.dut2_if1, + azurerm_network_interface.dut2_if2] connection { - user = "ubuntu" - host = data.azurerm_public_ip.dut2_public_ip.ip_address + user = "ubuntu" + host = data.azurerm_public_ip.dut2_public_ip.ip_address private_key = file("~/.ssh/id_rsa") } provisioner "ansible" { plays { playbook { - file_path = "../../testbed-setup/ansible/site.yaml" + file_path = "../../testbed-setup/ansible/site.yaml" force_handlers = true } hosts = ["sut_azure"] extra_vars = { ansible_python_interpreter = "/usr/bin/python3" - azure = true + azure = true } } } } resource "null_resource" "deploy_topology" { - depends_on = [ azurerm_virtual_machine.tg, - azurerm_network_interface.tg_if1, - azurerm_network_interface.tg_if2, - azurerm_virtual_machine.dut1, - azurerm_network_interface.dut1_if1, - azurerm_network_interface.dut1_if2, - azurerm_virtual_machine.dut2, - azurerm_network_interface.dut2_if1, - azurerm_network_interface.dut2_if2 ] + depends_on = [azurerm_virtual_machine.tg, + azurerm_network_interface.tg_if1, + azurerm_network_interface.tg_if2, + azurerm_virtual_machine.dut1, + azurerm_network_interface.dut1_if1, + azurerm_network_interface.dut1_if2, + azurerm_virtual_machine.dut2, + azurerm_network_interface.dut2_if1, + azurerm_network_interface.dut2_if2] provisioner "ansible" { plays { playbook { @@ -565,16 +565,16 @@ resource "null_resource" "deploy_topology" { hosts = ["local"] extra_vars = { ansible_python_interpreter = "/usr/bin/python3" - cloud_topology = "3n_azure_Fsv2" - tg_if1_mac = data.azurerm_network_interface.tg_if1.mac_address - tg_if2_mac = data.azurerm_network_interface.tg_if2.mac_address - dut1_if1_mac = data.azurerm_network_interface.dut1_if1.mac_address - dut1_if2_mac = data.azurerm_network_interface.dut1_if2.mac_address - dut2_if1_mac = data.azurerm_network_interface.dut2_if1.mac_address - dut2_if2_mac = data.azurerm_network_interface.dut2_if2.mac_address - tg_public_ip = data.azurerm_public_ip.tg_public_ip.ip_address - dut1_public_ip = data.azurerm_public_ip.dut1_public_ip.ip_address - dut2_public_ip = data.azurerm_public_ip.dut2_public_ip.ip_address + cloud_topology = "3n_azure_Fsv2" + tg_if1_mac = data.azurerm_network_interface.tg_if1.mac_address + tg_if2_mac = data.azurerm_network_interface.tg_if2.mac_address + dut1_if1_mac = data.azurerm_network_interface.dut1_if1.mac_address + dut1_if2_mac = data.azurerm_network_interface.dut1_if2.mac_address + dut2_if1_mac = data.azurerm_network_interface.dut2_if1.mac_address + dut2_if2_mac = data.azurerm_network_interface.dut2_if2.mac_address + tg_public_ip = data.azurerm_public_ip.tg_public_ip.ip_address + dut1_public_ip = data.azurerm_public_ip.dut1_public_ip.ip_address + dut2_public_ip = data.azurerm_public_ip.dut2_public_ip.ip_address } } } diff --git a/fdio.infra.terraform/3n_azure_fsv2/nic.tf b/fdio.infra.terraform/3n_azure_fsv2/nic.tf index 51692593c6..0bc9e900a7 100644 --- a/fdio.infra.terraform/3n_azure_fsv2/nic.tf +++ b/fdio.infra.terraform/3n_azure_fsv2/nic.tf @@ -1,133 +1,133 @@ # Create a network interface for the data-plane traffic resource "azurerm_network_interface" "dut1_if2" { - name = "dut1_if2" - location = azurerm_resource_group.CSIT.location - resource_group_name = azurerm_resource_group.CSIT.name - network_security_group_id = azurerm_network_security_group.CSIT.id - enable_ip_forwarding = "true" - enable_accelerated_networking = "true" - - ip_configuration { - name = "dut1_if2" - subnet_id = azurerm_subnet.c.id - private_ip_address_allocation = "Static" - private_ip_address = "172.16.200.101" - } + name = "dut1_if2" + location = azurerm_resource_group.CSIT.location + resource_group_name = azurerm_resource_group.CSIT.name + network_security_group_id = azurerm_network_security_group.CSIT.id + enable_ip_forwarding = "true" + enable_accelerated_networking = "true" + + ip_configuration { + name = "dut1_if2" + subnet_id = azurerm_subnet.c.id + private_ip_address_allocation = "Static" + private_ip_address = "172.16.200.101" + } } data "azurerm_network_interface" "dut1_if2" { name = "dut1_if2" resource_group_name = azurerm_resource_group.CSIT.name - depends_on = [ azurerm_virtual_machine.dut1 ] + depends_on = [azurerm_virtual_machine.dut1] } resource "azurerm_network_interface" "dut2_if1" { - name = "dut2_if1" - location = azurerm_resource_group.CSIT.location - resource_group_name = azurerm_resource_group.CSIT.name - network_security_group_id = azurerm_network_security_group.CSIT.id - enable_ip_forwarding = "true" - enable_accelerated_networking = "true" - - ip_configuration { - name = "dut2_if1" - subnet_id = azurerm_subnet.c.id - private_ip_address_allocation = "Static" - private_ip_address = "172.16.200.102" - } + name = "dut2_if1" + location = azurerm_resource_group.CSIT.location + resource_group_name = azurerm_resource_group.CSIT.name + network_security_group_id = azurerm_network_security_group.CSIT.id + enable_ip_forwarding = "true" + enable_accelerated_networking = "true" + + ip_configuration { + name = "dut2_if1" + subnet_id = azurerm_subnet.c.id + private_ip_address_allocation = "Static" + private_ip_address = "172.16.200.102" + } } data "azurerm_network_interface" "dut2_if1" { name = "dut2_if1" resource_group_name = azurerm_resource_group.CSIT.name - depends_on = [ azurerm_virtual_machine.dut2 ] + depends_on = [azurerm_virtual_machine.dut2] } resource "azurerm_network_interface" "dut1_if1" { - name = "dut1_if1" - location = azurerm_resource_group.CSIT.location - resource_group_name = azurerm_resource_group.CSIT.name - network_security_group_id = azurerm_network_security_group.CSIT.id - enable_ip_forwarding = "true" - enable_accelerated_networking = "true" - - ip_configuration { - name = "dut1_if1" - subnet_id = azurerm_subnet.b.id - private_ip_address_allocation = "Static" - private_ip_address = "172.16.10.11" - } + name = "dut1_if1" + location = azurerm_resource_group.CSIT.location + resource_group_name = azurerm_resource_group.CSIT.name + network_security_group_id = azurerm_network_security_group.CSIT.id + enable_ip_forwarding = "true" + enable_accelerated_networking = "true" + + ip_configuration { + name = "dut1_if1" + subnet_id = azurerm_subnet.b.id + private_ip_address_allocation = "Static" + private_ip_address = "172.16.10.11" + } } data "azurerm_network_interface" "dut1_if1" { name = "dut1_if1" resource_group_name = azurerm_resource_group.CSIT.name - depends_on = [ azurerm_virtual_machine.dut1 ] + depends_on = [azurerm_virtual_machine.dut1] } resource "azurerm_network_interface" "dut2_if2" { - name = "dut2_if2" - location = azurerm_resource_group.CSIT.location - resource_group_name = azurerm_resource_group.CSIT.name - network_security_group_id = azurerm_network_security_group.CSIT.id - enable_ip_forwarding = "true" - enable_accelerated_networking = "true" - - ip_configuration { - name = "dut2_if2" - subnet_id = azurerm_subnet.d.id - private_ip_address_allocation = "Static" - private_ip_address = "172.16.20.11" - } + name = "dut2_if2" + location = azurerm_resource_group.CSIT.location + resource_group_name = azurerm_resource_group.CSIT.name + network_security_group_id = azurerm_network_security_group.CSIT.id + enable_ip_forwarding = "true" + enable_accelerated_networking = "true" + + ip_configuration { + name = "dut2_if2" + subnet_id = azurerm_subnet.d.id + private_ip_address_allocation = "Static" + private_ip_address = "172.16.20.11" + } } data "azurerm_network_interface" "dut2_if2" { name = "dut2_if2" resource_group_name = azurerm_resource_group.CSIT.name - depends_on = [ azurerm_virtual_machine.dut2 ] + depends_on = [azurerm_virtual_machine.dut2] } resource "azurerm_network_interface" "tg_if1" { - name = "tg_if1" - location = azurerm_resource_group.CSIT.location - resource_group_name = azurerm_resource_group.CSIT.name - network_security_group_id = azurerm_network_security_group.CSIT.id - enable_ip_forwarding = "true" - enable_accelerated_networking = "true" - - ip_configuration { - name = "tg1" - subnet_id = azurerm_subnet.b.id - private_ip_address_allocation = "Static" - private_ip_address = "172.16.10.250" - } + name = "tg_if1" + location = azurerm_resource_group.CSIT.location + resource_group_name = azurerm_resource_group.CSIT.name + network_security_group_id = azurerm_network_security_group.CSIT.id + enable_ip_forwarding = "true" + enable_accelerated_networking = "true" + + ip_configuration { + name = "tg1" + subnet_id = azurerm_subnet.b.id + private_ip_address_allocation = "Static" + private_ip_address = "172.16.10.250" + } } data "azurerm_network_interface" "tg_if1" { name = "tg_if1" resource_group_name = azurerm_resource_group.CSIT.name - depends_on = [ azurerm_virtual_machine.tg ] + depends_on = [azurerm_virtual_machine.tg] } resource "azurerm_network_interface" "tg_if2" { - name = "tg_if2" - location = azurerm_resource_group.CSIT.location - resource_group_name = azurerm_resource_group.CSIT.name - network_security_group_id = azurerm_network_security_group.CSIT.id - enable_ip_forwarding = "true" - enable_accelerated_networking = "true" - - ip_configuration { - name = "tg2" - subnet_id = azurerm_subnet.d.id - private_ip_address_allocation = "Static" - private_ip_address = "172.16.20.250" - } + name = "tg_if2" + location = azurerm_resource_group.CSIT.location + resource_group_name = azurerm_resource_group.CSIT.name + network_security_group_id = azurerm_network_security_group.CSIT.id + enable_ip_forwarding = "true" + enable_accelerated_networking = "true" + + ip_configuration { + name = "tg2" + subnet_id = azurerm_subnet.d.id + private_ip_address_allocation = "Static" + private_ip_address = "172.16.20.250" + } } data "azurerm_network_interface" "tg_if2" { name = "tg_if2" resource_group_name = azurerm_resource_group.CSIT.name - depends_on = [ azurerm_virtual_machine.tg ] + depends_on = [azurerm_virtual_machine.tg] } |