aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorpmikus <peter.mikus@protonmail.ch>2024-01-25 07:37:31 +0000
committerPeter Mikus <peter.mikus@protonmail.ch>2024-01-25 07:44:34 +0000
commit6067078db338b930972ad6f183a4c75d2cf9214a (patch)
treeeb93118dc9c6a2b3ee742a5e4914e061ec581c65
parent7b03d44e6761ee27a0fead3401f9b2d04d95f709 (diff)
feat(terraform): Migrating old structures
Signed-off-by: Peter Mikus <peter.mikus@protonmail.ch> Change-Id: Ib953739b4d34a7f803bff54cec92a2130fcbbc66
-rw-r--r--fdio.infra.ansible/site.yaml2
-rw-r--r--fdio.infra.ansible/sut.yaml29
-rw-r--r--fdio.infra.ansible/tg.yaml29
-rw-r--r--fdio.infra.terraform/1n_nmd/main.tf60
-rw-r--r--fdio.infra.terraform/1n_nmd/minio/conf/nomad/mc.hcl73
-rw-r--r--fdio.infra.terraform/1n_nmd/minio/conf/nomad/minio.hcl223
-rw-r--r--fdio.infra.terraform/1n_nmd/minio/main.tf82
-rw-r--r--fdio.infra.terraform/1n_nmd/minio/outputs.tf4
-rw-r--r--fdio.infra.terraform/1n_nmd/minio/providers.tf13
-rw-r--r--fdio.infra.terraform/1n_nmd/minio/variables.tf170
-rw-r--r--fdio.infra.terraform/1n_nmd/versions.tf21
-rw-r--r--fdio.infra.terraform/1n_nmd/vpp_device/main.tf21
-rw-r--r--fdio.infra.terraform/1n_nmd/vpp_device/versions.tf13
-rw-r--r--fdio.infra.terraform/terraform-nomad-vpp-device/conf/nomad/vpp-device.hcl.tftpl (renamed from fdio.infra.terraform/1n_nmd/vpp_device/conf/nomad/csit_shim.hcl)155
-rw-r--r--fdio.infra.terraform/terraform-nomad-vpp-device/fdio/main.tf15
-rw-r--r--fdio.infra.terraform/terraform-nomad-vpp-device/fdio/providers.tf (renamed from fdio.infra.terraform/1n_nmd/providers.tf)6
-rw-r--r--fdio.infra.terraform/terraform-nomad-vpp-device/fdio/variables.tf (renamed from fdio.infra.terraform/1n_nmd/variables.tf)20
-rw-r--r--fdio.infra.terraform/terraform-nomad-vpp-device/fdio/versions.tf9
-rw-r--r--fdio.infra.terraform/terraform-nomad-vpp-device/main.tf19
-rw-r--r--fdio.infra.terraform/terraform-nomad-vpp-device/variables.tf (renamed from fdio.infra.terraform/1n_nmd/vpp_device/variables.tf)16
-rw-r--r--fdio.infra.terraform/terraform-nomad-vpp-device/versions.tf9
21 files changed, 226 insertions, 763 deletions
diff --git a/fdio.infra.ansible/site.yaml b/fdio.infra.ansible/site.yaml
index 9350a2c140..45a090344e 100644
--- a/fdio.infra.ansible/site.yaml
+++ b/fdio.infra.ansible/site.yaml
@@ -6,12 +6,14 @@
- tg
- tg_aws
- tg_azure
+ - tg_openstack
- import_playbook: sut.yaml
tags:
- sut
- sut_aws
- sut_azure
+ - sut_openstasck
- import_playbook: vpp_device.yaml
tags:
diff --git a/fdio.infra.ansible/sut.yaml b/fdio.infra.ansible/sut.yaml
index 10fcd6f7b0..57be961ee1 100644
--- a/fdio.infra.ansible/sut.yaml
+++ b/fdio.infra.ansible/sut.yaml
@@ -103,3 +103,32 @@
tags: cleanup
- role: calibration
tags: calibration
+
+- hosts: sut_openstack
+ become: true
+ become_user: root
+ gather_facts: false
+ pre_tasks:
+ - name: Gathering Facts
+ gather_facts:
+ tags:
+ - always
+ roles:
+ - role: user_add
+ tags: user_add
+ - role: common
+ tags: common
+ - role: python_env
+ tags: python_env
+ - role: docker
+ tags: docker
+ - role: vpp
+ tags: vpp
+ - role: iperf
+ tags: iperf
+ - role: dpdk
+ tags: dpdk
+ - role: cleanup
+ tags: cleanup
+ - role: calibration
+ tags: calibration \ No newline at end of file
diff --git a/fdio.infra.ansible/tg.yaml b/fdio.infra.ansible/tg.yaml
index 6d8a2fbe42..de8706ffd1 100644
--- a/fdio.infra.ansible/tg.yaml
+++ b/fdio.infra.ansible/tg.yaml
@@ -105,3 +105,32 @@
tags: cleanup
- role: calibration
tags: calibration
+
+- hosts: tg_openstack
+ become: true
+ become_user: root
+ gather_facts: false
+ pre_tasks:
+ - name: Gathering Facts
+ gather_facts:
+ tags:
+ - always
+ roles:
+ - role: user_add
+ tags: user_add
+ - role: common
+ tags: common
+ - role: python_env
+ tags: python_env
+ - role: docker
+ tags: docker
+ - role: iperf
+ tags: iperf
+ - role: trex
+ tags: trex
+ - role: ab
+ tags: ab
+ - role: cleanup
+ tags: cleanup
+ - role: calibration
+ tags: calibration \ No newline at end of file
diff --git a/fdio.infra.terraform/1n_nmd/main.tf b/fdio.infra.terraform/1n_nmd/main.tf
deleted file mode 100644
index 77701835aa..0000000000
--- a/fdio.infra.terraform/1n_nmd/main.tf
+++ /dev/null
@@ -1,60 +0,0 @@
-# For convenience in simple configurations, a child module automatically
-# inherits default (un-aliased) provider configurations from its parent.
-# This means that explicit provider blocks appear only in the root module,
-# and downstream modules can simply declare resources for that provider
-# and have them automatically associated with the root provider
-# configurations.
-
-#module "minio" {
-# source = "./minio"
-# providers = {
-# nomad = nomad.yul1
-# }
-#
-# # nomad
-# nomad_datacenters = ["yul1"]
-# nomad_host_volume = "prod-volume-data1-1"
-#
-# # minio
-# minio_job_name = "prod-minio"
-# minio_group_count = 4
-# minio_service_name = "storage"
-# minio_host = "http://10.32.8.1{4...7}"
-# minio_port = 9000
-# minio_container_image = "minio/minio:RELEASE.2021-07-27T02-40-15Z"
-# minio_vault_secret = {
-# use_vault_provider = false,
-# vault_kv_policy_name = "kv-secret",
-# vault_kv_path = "secret/data/minio",
-# vault_kv_field_access_key = "access_key",
-# vault_kv_field_secret_key = "secret_key"
-# }
-# minio_data_dir = "/data/"
-# minio_use_host_volume = true
-# minio_use_canary = true
-# minio_envs = ["MINIO_BROWSER=\"off\""]
-#
-# minio_buckets = ["logs.fd.io"]
-#}
-
-data "vault_generic_secret" "minio_creds" {
- path = "kv/secret/data/minio"
-}
-
-module "vpp_device" {
- source = "./vpp_device"
- providers = {
- nomad = nomad.yul1
- }
-
- # nomad
- nomad_datacenters = ["yul1"]
-
- # csit_shim
- csit_shim_job_name = "prod-device-csit-shim"
- csit_shim_group_count = "1"
- csit_shim_cpu = "1500"
- csit_shim_mem = "4096"
- csit_shim_image_aarch64 = "fdiotools/csit_shim-ubuntu2004:2021_03_02_143938_UTC-aarch64"
- csit_shim_image_x86_64 = "fdiotools/csit_shim-ubuntu2004:2021_03_04_142103_UTC-x86_64"
-}
diff --git a/fdio.infra.terraform/1n_nmd/minio/conf/nomad/mc.hcl b/fdio.infra.terraform/1n_nmd/minio/conf/nomad/mc.hcl
deleted file mode 100644
index 238003bb00..0000000000
--- a/fdio.infra.terraform/1n_nmd/minio/conf/nomad/mc.hcl
+++ /dev/null
@@ -1,73 +0,0 @@
-job "${job_name}" {
- # The "region" parameter specifies the region in which to execute the job.
- # If omitted, this inherits the default region name of "global".
- # region = "global"
- #
- # The "datacenters" parameter specifies the list of datacenters which should
- # be considered when placing this task. This must be provided.
- datacenters = "${datacenters}"
-
- # The "type" parameter controls the type of job, which impacts the scheduler's
- # decision on placement. This configuration is optional and defaults to
- # "service". For a full list of job types and their differences, please see
- # the online documentation.
- #
- # For more information, please see the online documentation at:
- #
- # https://www.nomadproject.io/docs/jobspec/schedulers.html
- #
- type = "batch"
-
- # The "group" stanza defines a series of tasks that should be co-located on
- # the same Nomad client. Any task within a group will be placed on the same
- # client.
- #
- # For more information and examples on the "group" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/group.html
- #
- group "prod-group1-mc" {
- task "prod-task1-create-buckets" {
- # The "driver" parameter specifies the task driver that should be used to
- # run the task.
- driver = "docker"
-
- %{ if use_vault_provider }
- vault {
- policies = "${vault_kv_policy_name}"
- }
- %{ endif }
-
- # The "config" stanza specifies the driver configuration, which is passed
- # directly to the driver to start the task. The details of configurations
- # are specific to each driver, so please see specific driver
- # documentation for more information.
- config {
- image = "${image}"
- entrypoint = [
- "/bin/sh",
- "-c",
- "${command}"
- ]
- dns_servers = [ "$${attr.unique.network.ip-address}" ]
- privileged = false
- }
-
- # The env stanza configures a list of environment variables to populate
- # the task's environment before starting.
- env {
- %{ if use_vault_provider }
- {{ with secret "${vault_kv_path}" }}
- MINIO_ACCESS_KEY = "{{ .Data.data.${vault_kv_field_access_key} }}"
- MINIO_SECRET_KEY = "{{ .Data.data.${vault_kv_field_secret_key} }}"
- {{ end }}
- %{ else }
- MINIO_ACCESS_KEY = "${access_key}"
- MINIO_SECRET_KEY = "${secret_key}"
- %{ endif }
- ${ envs }
- }
- }
- }
-}
diff --git a/fdio.infra.terraform/1n_nmd/minio/conf/nomad/minio.hcl b/fdio.infra.terraform/1n_nmd/minio/conf/nomad/minio.hcl
deleted file mode 100644
index 3889b51a9f..0000000000
--- a/fdio.infra.terraform/1n_nmd/minio/conf/nomad/minio.hcl
+++ /dev/null
@@ -1,223 +0,0 @@
-job "${job_name}" {
- # The "region" parameter specifies the region in which to execute the job.
- # If omitted, this inherits the default region name of "global".
- # region = "global"
- #
- # The "datacenters" parameter specifies the list of datacenters which should
- # be considered when placing this task. This must be provided.
- datacenters = "${datacenters}"
-
- # The "type" parameter controls the type of job, which impacts the scheduler's
- # decision on placement. This configuration is optional and defaults to
- # "service". For a full list of job types and their differences, please see
- # the online documentation.
- #
- # https://www.nomadproject.io/docs/jobspec/schedulers
- #
- type = "service"
-
- update {
- # The "max_parallel" parameter specifies the maximum number of updates to
- # perform in parallel. In this case, this specifies to update a single task
- # at a time.
- max_parallel = 1
-
- health_check = "checks"
-
- # The "min_healthy_time" parameter specifies the minimum time the allocation
- # must be in the healthy state before it is marked as healthy and unblocks
- # further allocations from being updated.
- min_healthy_time = "10s"
-
- # The "healthy_deadline" parameter specifies the deadline in which the
- # allocation must be marked as healthy after which the allocation is
- # automatically transitioned to unhealthy. Transitioning to unhealthy will
- # fail the deployment and potentially roll back the job if "auto_revert" is
- # set to true.
- healthy_deadline = "3m"
-
- # The "progress_deadline" parameter specifies the deadline in which an
- # allocation must be marked as healthy. The deadline begins when the first
- # allocation for the deployment is created and is reset whenever an allocation
- # as part of the deployment transitions to a healthy state. If no allocation
- # transitions to the healthy state before the progress deadline, the
- # deployment is marked as failed.
- progress_deadline = "10m"
-
-%{ if use_canary }
- # The "canary" parameter specifies that changes to the job that would result
- # in destructive updates should create the specified number of canaries
- # without stopping any previous allocations. Once the operator determines the
- # canaries are healthy, they can be promoted which unblocks a rolling update
- # of the remaining allocations at a rate of "max_parallel".
- #
- # Further, setting "canary" equal to the count of the task group allows
- # blue/green deployments. When the job is updated, a full set of the new
- # version is deployed and upon promotion the old version is stopped.
- canary = 1
-
- # Specifies if the job should auto-promote to the canary version when all
- # canaries become healthy during a deployment. Defaults to false which means
- # canaries must be manually updated with the nomad deployment promote
- # command.
- auto_promote = true
-
- # The "auto_revert" parameter specifies if the job should auto-revert to the
- # last stable job on deployment failure. A job is marked as stable if all the
- # allocations as part of its deployment were marked healthy.
- auto_revert = true
-%{ endif }
- }
-
- # All groups in this job should be scheduled on different hosts.
- constraint {
- operator = "distinct_hosts"
- value = "true"
- }
-
- # The "group" stanza defines a series of tasks that should be co-located on
- # the same Nomad client. Any task within a group will be placed on the same
- # client.
- #
- # https://www.nomadproject.io/docs/job-specification/group
- #
- group "prod-group1-minio" {
- # The "count" parameter specifies the number of the task groups that should
- # be running under this group. This value must be non-negative and defaults
- # to 1.
- count = ${group_count}
-
- # https://www.nomadproject.io/docs/job-specification/volume
- %{ if use_host_volume }
- volume "prod-volume1-minio" {
- type = "host"
- read_only = false
- source = "${host_volume}"
- }
- %{ endif }
-
- # The restart stanza configures a tasks's behavior on task failure. Restarts
- # happen on the client that is running the task.
- #
- # https://www.nomadproject.io/docs/job-specification/restart
- #
- restart {
- interval = "30m"
- attempts = 40
- delay = "15s"
- mode = "delay"
- }
-
- # The "task" stanza creates an individual unit of work, such as a Docker
- # container, web application, or batch processing.
- #
- # https://www.nomadproject.io/docs/job-specification/task.html
- #
- task "prod-task1-minio" {
- # The "driver" parameter specifies the task driver that should be used to
- # run the task.
- driver = "docker"
-
- %{ if use_host_volume }
- volume_mount {
- volume = "prod-volume1-minio"
- destination = "${data_dir}"
- read_only = false
- }
- %{ endif }
-
- %{ if use_vault_provider }
- vault {
- policies = "${vault_kv_policy_name}"
- }
- %{ endif }
-
- # The "config" stanza specifies the driver configuration, which is passed
- # directly to the driver to start the task. The details of configurations
- # are specific to each driver, so please see specific driver
- # documentation for more information.
- config {
- image = "${image}"
- dns_servers = [ "172.17.0.1" ]
- network_mode = "host"
- command = "server"
- args = [ "${host}:${port}${data_dir}" ]
- port_map {
- http = ${port}
- }
- privileged = false
- }
-
- # The env stanza configures a list of environment variables to populate
- # the task's environment before starting.
- env {
-%{ if use_vault_provider }
-{{ with secret "${vault_kv_path}" }}
- MINIO_ACCESS_KEY = "{{ .Data.data.${vault_kv_field_access_key} }}"
- MINIO_SECRET_KEY = "{{ .Data.data.${vault_kv_field_secret_key} }}"
-{{ end }}
-%{ else }
- MINIO_ACCESS_KEY = "${access_key}"
- MINIO_SECRET_KEY = "${secret_key}"
-%{ endif }
- ${ envs }
- }
-
- # The service stanza instructs Nomad to register a service with Consul.
- #
- # https://www.nomadproject.io/docs/job-specification/service
- #
- service {
- name = "${service_name}"
- port = "http"
- tags = [ "storage$${NOMAD_ALLOC_INDEX}" ]
- check {
- name = "Min.io Server HTTP Check Live"
- type = "http"
- port = "http"
- protocol = "http"
- method = "GET"
- path = "/minio/health/live"
- interval = "10s"
- timeout = "2s"
- }
- check {
- name = "Min.io Server HTTP Check Ready"
- type = "http"
- port = "http"
- protocol = "http"
- method = "GET"
- path = "/minio/health/ready"
- interval = "10s"
- timeout = "2s"
- }
- }
-
- # The "resources" stanza describes the requirements a task needs to
- # execute. Resource requirements include memory, network, cpu, and more.
- # This ensures the task will execute on a machine that contains enough
- # resource capacity.
- #
- # https://www.nomadproject.io/docs/job-specification/resources
- #
- resources {
- cpu = ${cpu}
- memory = ${memory}
- # The network stanza specifies the networking requirements for the task
- # group, including the network mode and port allocations. When scheduling
- # jobs in Nomad they are provisioned across your fleet of machines along
- # with other jobs and services. Because you don't know in advance what host
- # your job will be provisioned on, Nomad will provide your tasks with
- # network configuration when they start up.
- #
- # https://www.nomadproject.io/docs/job-specification/network
- #
- network {
- port "http" {
- static = ${port}
- }
- }
- }
- }
- }
-}
diff --git a/fdio.infra.terraform/1n_nmd/minio/main.tf b/fdio.infra.terraform/1n_nmd/minio/main.tf
deleted file mode 100644
index 6954cc2f05..0000000000
--- a/fdio.infra.terraform/1n_nmd/minio/main.tf
+++ /dev/null
@@ -1,82 +0,0 @@
-locals {
- datacenters = join(",", var.nomad_datacenters)
- minio_env_vars = join("\n",
- concat([
- ], var.minio_envs)
- )
- mc_env_vars = join("\n",
- concat([
- ], var.mc_envs)
- )
- mc_formatted_bucket_list = formatlist("LOCALMINIO/%s", var.minio_buckets)
- mc_add_config_command = concat(
- [
- "mc",
- "config",
- "host",
- "add",
- "LOCALMINIO",
- "http://${var.minio_service_name}.service.consul:${var.minio_port}",
- "$MINIO_ACCESS_KEY",
- "$MINIO_SECRET_KEY",
- ])
- mc_create_bucket_command = concat(["mc", "mb", "-p"], local.mc_formatted_bucket_list)
- command = join(" ", concat(local.mc_add_config_command, ["&&"], local.mc_create_bucket_command, [";"], concat(var.mc_extra_commands)))
-}
-
-data "template_file" "nomad_job_minio" {
- template = file("${path.module}/conf/nomad/minio.hcl")
- vars = {
- job_name = var.minio_job_name
- datacenters = local.datacenters
- use_canary = var.minio_use_canary
- group_count = var.minio_group_count
- use_host_volume = var.minio_use_host_volume
- host_volume = var.nomad_host_volume
- service_name = var.minio_service_name
- host = var.minio_host
- port = var.minio_port
- upstreams = jsonencode(var.minio_upstreams)
- cpu_proxy = var.minio_resource_proxy.cpu
- memory_proxy = var.minio_resource_proxy.memory
- use_vault_provider = var.minio_vault_secret.use_vault_provider
- image = var.minio_container_image
- access_key = var.minio_access_key
- secret_key = var.minio_secret_key
- data_dir = var.minio_data_dir
- envs = local.minio_env_vars
- cpu = var.minio_cpu
- memory = var.minio_memory
- }
-}
-
-data "template_file" "nomad_job_mc" {
- template = file("${path.module}/conf/nomad/mc.hcl")
- vars = {
- job_name = var.mc_job_name
- service_name = var.mc_service_name
- datacenters = local.datacenters
- minio_service_name = var.minio_service_name
- minio_port = var.minio_port
- image = var.mc_container_image
- access_key = var.minio_access_key
- secret_key = var.minio_secret_key
- use_vault_provider = var.minio_vault_secret.use_vault_provider
- envs = local.mc_env_vars
- command = local.command
- }
-}
-
-resource "nomad_job" "nomad_job_minio" {
- jobspec = data.template_file.nomad_job_minio.rendered
- detach = false
-}
-
-#resource "nomad_job" "nomad_job_mc" {
-# jobspec = data.template_file.nomad_job_mc.rendered
-# detach = false
-#
-# depends_on = [
-# nomad_job.nomad_job_minio
-# ]
-#} \ No newline at end of file
diff --git a/fdio.infra.terraform/1n_nmd/minio/outputs.tf b/fdio.infra.terraform/1n_nmd/minio/outputs.tf
deleted file mode 100644
index 309cd3b9d0..0000000000
--- a/fdio.infra.terraform/1n_nmd/minio/outputs.tf
+++ /dev/null
@@ -1,4 +0,0 @@
-output "minio_service_name" {
- description = "Minio service name"
- value = data.template_file.nomad_job_minio.vars.service_name
-} \ No newline at end of file
diff --git a/fdio.infra.terraform/1n_nmd/minio/providers.tf b/fdio.infra.terraform/1n_nmd/minio/providers.tf
deleted file mode 100644
index 1399201d21..0000000000
--- a/fdio.infra.terraform/1n_nmd/minio/providers.tf
+++ /dev/null
@@ -1,13 +0,0 @@
-terraform {
- required_providers {
- nomad = {
- source = "hashicorp/nomad"
- version = "~> 1.4.9"
- }
- template = {
- source = "hashicorp/template"
- version = "~> 2.1.2"
- }
- }
- required_version = ">= 1.0.3"
-}
diff --git a/fdio.infra.terraform/1n_nmd/minio/variables.tf b/fdio.infra.terraform/1n_nmd/minio/variables.tf
deleted file mode 100644
index ab9d07f0d7..0000000000
--- a/fdio.infra.terraform/1n_nmd/minio/variables.tf
+++ /dev/null
@@ -1,170 +0,0 @@
-# Nomad
-variable "nomad_datacenters" {
- description = "Nomad data centers"
- type = list(string)
- default = ["dc1"]
-}
-
-variable "nomad_host_volume" {
- description = "Nomad Host Volume"
- type = string
- default = "persistence"
-}
-
-# Minio
-variable "minio_job_name" {
- description = "Minio job name"
- type = string
- default = "minio"
-}
-
-variable "minio_service_name" {
- description = "Minio service name"
- type = string
- default = "minio"
-}
-
-variable "minio_group_count" {
- description = "Number of Minio group instances"
- type = number
- default = 1
-}
-
-variable "minio_host" {
- description = "Minio host"
- type = string
- default = "127.0.0.1"
-}
-
-variable "minio_port" {
- description = "Minio port"
- type = number
- default = 9000
-}
-
-variable "minio_cpu" {
- description = "CPU allocation for Minio"
- type = number
- default = 40000
-}
-
-variable "minio_memory" {
- description = "Memory allocation for Minio"
- type = number
- default = 40000
-}
-
-variable "minio_container_image" {
- description = "Minio docker image"
- type = string
- default = "minio/minio:latest"
-}
-
-variable "minio_envs" {
- description = "Minio environment variables"
- type = list(string)
- default = []
-}
-
-variable "minio_access_key" {
- description = "Minio access key"
- type = string
- default = "minio"
-}
-
-variable "minio_secret_key" {
- description = "Minio secret key"
- type = string
- default = "minio123"
-}
-
-variable "minio_data_dir" {
- description = "Minio server data dir"
- type = string
- default = "/data/"
-}
-
-variable "minio_use_host_volume" {
- description = "Use Nomad host volume feature"
- type = bool
- default = false
-}
-
-variable "minio_use_canary" {
- description = "Uses canary deployment for Minio"
- type = bool
- default = false
-}
-
-variable "minio_vault_secret" {
- description = "Set of properties to be able to fetch secret from vault"
- type = object({
- use_vault_provider = bool,
- vault_kv_policy_name = string,
- vault_kv_path = string,
- vault_kv_field_access_key = string,
- vault_kv_field_secret_key = string
- })
-}
-
-variable "minio_resource_proxy" {
- description = "Minio proxy resources"
- type = object({
- cpu = number,
- memory = number
- })
- default = {
- cpu = 200,
- memory = 128
- }
- validation {
- condition = var.minio_resource_proxy.cpu >= 200 && var.minio_resource_proxy.memory >= 128
- error_message = "Proxy resource must be at least: cpu=200, memory=128."
- }
-}
-
-# MC
-variable "mc_job_name" {
- description = "Minio client job name"
- type = string
- default = "mc"
-}
-
-variable "mc_service_name" {
- description = "Minio client service name"
- type = string
- default = "mc"
-}
-
-variable "mc_container_image" {
- description = "Minio client docker image"
- type = string
- default = "minio/mc:latest"
-}
-
-variable "mc_envs" {
- description = "Minio client environment variables"
- type = list(string)
- default = []
-}
-
-variable "minio_buckets" {
- description = "List of buckets to create on startup"
- type = list(string)
- default = []
-}
-
-variable "minio_upstreams" {
- description = "List of upstream services (list of object with service_name, port)"
- type = list(object({
- service_name = string,
- port = number,
- }))
- default = []
-}
-
-variable "mc_extra_commands" {
- description = "Extra commands to run in MC container after creating buckets"
- type = list(string)
- default = [""]
-} \ No newline at end of file
diff --git a/fdio.infra.terraform/1n_nmd/versions.tf b/fdio.infra.terraform/1n_nmd/versions.tf
deleted file mode 100644
index 556ddbaee4..0000000000
--- a/fdio.infra.terraform/1n_nmd/versions.tf
+++ /dev/null
@@ -1,21 +0,0 @@
-terraform {
- backend "consul" {
- address = "consul.service.consul:8500"
- scheme = "http"
- path = "terraform/nomad"
- }
- required_providers {
- nomad = {
- source = "hashicorp/nomad"
- version = "~> 1.4.9"
- }
- template = {
- source = "hashicorp/template"
- version = "~> 2.2.0"
- }
- vault = {
- version = ">=2.14.0"
- }
- }
- required_version = ">= 1.0.3"
-} \ No newline at end of file
diff --git a/fdio.infra.terraform/1n_nmd/vpp_device/main.tf b/fdio.infra.terraform/1n_nmd/vpp_device/main.tf
deleted file mode 100644
index 89b28ce385..0000000000
--- a/fdio.infra.terraform/1n_nmd/vpp_device/main.tf
+++ /dev/null
@@ -1,21 +0,0 @@
-locals {
- datacenters = join(",", var.nomad_datacenters)
-}
-
-data "template_file" "nomad_job_csit_shim" {
- template = file("${path.module}/conf/nomad/csit_shim.hcl")
- vars = {
- datacenters = local.datacenters
- job_name = var.csit_shim_job_name
- group_count = var.csit_shim_group_count
- cpu = var.csit_shim_cpu
- mem = var.csit_shim_mem
- image_aarch64 = var.csit_shim_image_aarch64
- image_x86_64 = var.csit_shim_image_x86_64
- }
-}
-
-resource "nomad_job" "nomad_job_csit_shim" {
- jobspec = data.template_file.nomad_job_csit_shim.rendered
- detach = false
-} \ No newline at end of file
diff --git a/fdio.infra.terraform/1n_nmd/vpp_device/versions.tf b/fdio.infra.terraform/1n_nmd/vpp_device/versions.tf
deleted file mode 100644
index b80610a525..0000000000
--- a/fdio.infra.terraform/1n_nmd/vpp_device/versions.tf
+++ /dev/null
@@ -1,13 +0,0 @@
-terraform {
- required_providers {
- nomad = {
- source = "hashicorp/nomad"
- version = "~> 1.4.15"
- }
- template = {
- source = "hashicorp/template"
- version = "~> 2.2.0"
- }
- }
- required_version = ">= 1.0.3"
-}
diff --git a/fdio.infra.terraform/1n_nmd/vpp_device/conf/nomad/csit_shim.hcl b/fdio.infra.terraform/terraform-nomad-vpp-device/conf/nomad/vpp-device.hcl.tftpl
index 9763088dcd..aac1a46165 100644
--- a/fdio.infra.terraform/1n_nmd/vpp_device/conf/nomad/csit_shim.hcl
+++ b/fdio.infra.terraform/terraform-nomad-vpp-device/conf/nomad/vpp-device.hcl.tftpl
@@ -12,51 +12,68 @@ job "${job_name}" {
# "service". For a full list of job types and their differences, please see
# the online documentation.
#
- # For more information, please see the online documentation at:
- #
- # https://www.nomadproject.io/docs/jobspec/schedulers.html
- #
type = "system"
# The "group" stanza defines a series of tasks that should be co-located on
# the same Nomad client. Any task within a group will be placed on the same
# client.
#
- # For more information and examples on the "group" stanza, please see
- # the online documentation at:
+ # https://www.nomadproject.io/docs/job-specification/group
#
- # https://www.nomadproject.io/docs/job-specification/group.html
- #
- group "prod-group1-csit-shim-amd" {
+ group "csit-shim-amd-group-1" {
# The "count" parameter specifies the number of the task groups that should
- # be running under this group. This value must be non-negative and defaults
- # to 1.
- count = ${group_count}
+ # be running under this group. This value must be non-negative.
+ count = ${group_count}
+ # The constraint allows restricting the set of eligible nodes. Constraints
+ # may filter on attributes or client metadata.
+ #
+ # https://www.nomadproject.io/docs/job-specification/constraint
+ #
constraint {
- attribute = "$${node.class}"
- value = "csit"
+ attribute = "$${node.class}"
+ value = "csit"
}
+ # The restart stanza configures a tasks's behavior on task failure. Restarts
+ # happen on the client that is running the task.
+ #
+ # https://www.nomadproject.io/docs/job-specification/restart
+ #
restart {
- interval = "1m"
- attempts = 3
- delay = "15s"
- mode = "delay"
+ interval = "1m"
+ attempts = 3
+ delay = "15s"
+ mode = "delay"
+ }
+
+ # The network stanza specifies the networking requirements for the task
+ # group, including the network mode and port allocations. When scheduling
+ # jobs in Nomad they are provisioned across your fleet of machines along
+ # with other jobs and services. Because you don't know in advance what host
+ # your job will be provisioned on, Nomad will provide your tasks with
+ # network configuration when they start up.
+ #
+ # https://www.nomadproject.io/docs/job-specification/network
+ #
+ network {
+ port "ssh" {
+ static = 6022
+ }
+ port "ssh2" {
+ static = 6023
+ }
}
# The "task" stanza creates an individual unit of work, such as a Docker
# container, web application, or batch processing.
#
- # For more information and examples on the "task" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/task.html
+ # https://www.nomadproject.io/docs/job-specification/task
#
- task "prod-task1-csit-shim-amd" {
+ task "csit-shim-amd-task-1" {
# The "driver" parameter specifies the task driver that should be used to
# run the task.
- driver = "docker"
+ driver = "docker"
# The "config" stanza specifies the driver configuration, which is passed
# directly to the driver to start the task. The details of configurations
@@ -77,56 +94,69 @@ job "${job_name}" {
# This ensures the task will execute on a machine that contains enough
# resource capacity.
#
- # For more information and examples on the "resources" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/resources.html
+ # https://www.nomadproject.io/docs/job-specification/resources
#
resources {
- cpu = ${cpu}
- memory = ${mem}
- network {
- port "ssh" {
- static = 6022
- }
- port "ssh2" {
- static = 6023
- }
- }
+ cpu = ${cpu}
+ memory = ${memory}
}
}
}
- group "prod-group1-csit-shim-arm" {
+ group "csit-shim-arm-group-1" {
# The "count" parameter specifies the number of the task groups that should
- # be running under this group. This value must be non-negative and defaults
- # to 1.
- count = ${group_count}
+ # be running under this group. This value must be non-negative.
+ count = ${group_count}
+ # The constraint allows restricting the set of eligible nodes. Constraints
+ # may filter on attributes or client metadata.
+ #
+ # https://www.nomadproject.io/docs/job-specification/constraint
+ #
constraint {
- attribute = "$${node.class}"
- value = "csitarm"
+ attribute = "$${node.class}"
+ value = "csitarm"
}
+ # The restart stanza configures a tasks's behavior on task failure. Restarts
+ # happen on the client that is running the task.
+ #
+ # https://www.nomadproject.io/docs/job-specification/restart
+ #
restart {
- interval = "1m"
- attempts = 3
- delay = "15s"
- mode = "delay"
+ interval = "1m"
+ attempts = 3
+ delay = "15s"
+ mode = "delay"
+ }
+
+ # The network stanza specifies the networking requirements for the task
+ # group, including the network mode and port allocations. When scheduling
+ # jobs in Nomad they are provisioned across your fleet of machines along
+ # with other jobs and services. Because you don't know in advance what host
+ # your job will be provisioned on, Nomad will provide your tasks with
+ # network configuration when they start up.
+ #
+ # https://www.nomadproject.io/docs/job-specification/network
+ #
+ network {
+ port "ssh" {
+ static = 6022
+ }
+ port "ssh2" {
+ static = 6023
+ }
}
# The "task" stanza creates an individual unit of work, such as a Docker
# container, web application, or batch processing.
#
- # For more information and examples on the "task" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/task.html
+ # https://www.nomadproject.io/docs/job-specification/task
#
- task "prod-task1-csit-shim-arm" {
+ task "csit-shim-arm-task-1" {
# The "driver" parameter specifies the task driver that should be used to
# run the task.
- driver = "docker"
+ driver = "docker"
# The "config" stanza specifies the driver configuration, which is passed
# directly to the driver to start the task. The details of configurations
@@ -147,22 +177,11 @@ job "${job_name}" {
# This ensures the task will execute on a machine that contains enough
# resource capacity.
#
- # For more information and examples on the "resources" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/resources.html
+ # https://www.nomadproject.io/docs/job-specification/resources
#
resources {
- cpu = ${cpu}
- memory = ${mem}
- network {
- port "ssh" {
- static = 6022
- }
- port "ssh2" {
- static = 6023
- }
- }
+ cpu = ${cpu}
+ memory = ${memory}
}
}
}
diff --git a/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/main.tf b/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/main.tf
new file mode 100644
index 0000000000..96666e6e89
--- /dev/null
+++ b/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/main.tf
@@ -0,0 +1,15 @@
+module "vpp-device" {
+ providers = {
+ nomad = nomad.yul1
+ }
+ source = "../"
+
+ # nomad
+ datacenters = ["yul1"]
+ job_name = "prod-device-csit-shim"
+ group_count = "1"
+ cpu = "1500"
+ memory = "4096"
+ image_aarch64 = "fdiotools/csit_shim-ubuntu2004:2021_03_02_143938_UTC-aarch64"
+ image_x86_64 = "fdiotools/csit_shim-ubuntu2004:2021_03_04_142103_UTC-x86_64"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/1n_nmd/providers.tf b/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/providers.tf
index 92ddb553e7..42a6a45ce0 100644
--- a/fdio.infra.terraform/1n_nmd/providers.tf
+++ b/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/providers.tf
@@ -7,7 +7,7 @@ provider "nomad" {
}
provider "vault" {
- address = "http://10.30.51.28:8200"
- skip_tls_verify = true
- token = var.token
+ address = var.vault_provider_address
+ skip_tls_verify = var.vault_provider_skip_tls_verify
+ token = var.vault_provider_token
} \ No newline at end of file
diff --git a/fdio.infra.terraform/1n_nmd/variables.tf b/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/variables.tf
index 598770eb13..569ba29c87 100644
--- a/fdio.infra.terraform/1n_nmd/variables.tf
+++ b/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/variables.tf
@@ -1,5 +1,5 @@
variable "nomad_acl" {
- description = "Nomad ACLs enabled/disabled"
+ description = "Nomad ACLs enabled/disabled."
type = bool
default = false
}
@@ -7,7 +7,7 @@ variable "nomad_acl" {
variable "nomad_provider_address" {
description = "FD.io Nomad cluster address."
type = string
- default = "http://nomad.service.consul:4646"
+ default = "http://10.30.51.23:4646"
}
variable "nomad_provider_ca_file" {
@@ -28,8 +28,20 @@ variable "nomad_provider_key_file" {
default = "/etc/nomad.d/ssl/nomad-cli-key.pem"
}
-variable "token" {
- description = "Vault root token"
+variable "vault_provider_address" {
+ description = "Vault cluster address."
+ type = string
+ default = "http://10.30.51.23:8200"
+}
+
+variable "vault_provider_skip_tls_verify" {
+ description = "Verification of the Vault server's TLS certificate."
+ type = bool
+ default = false
+}
+
+variable "vault_provider_token" {
+ description = "Vault root token."
type = string
sensitive = true
} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/versions.tf b/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/versions.tf
new file mode 100644
index 0000000000..82b792fd72
--- /dev/null
+++ b/fdio.infra.terraform/terraform-nomad-vpp-device/fdio/versions.tf
@@ -0,0 +1,9 @@
+terraform {
+ required_providers {
+ nomad = {
+ source = "hashicorp/nomad"
+ version = ">= 1.4.19"
+ }
+ }
+ required_version = ">= 1.3.7"
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/terraform-nomad-vpp-device/main.tf b/fdio.infra.terraform/terraform-nomad-vpp-device/main.tf
new file mode 100644
index 0000000000..2e67085714
--- /dev/null
+++ b/fdio.infra.terraform/terraform-nomad-vpp-device/main.tf
@@ -0,0 +1,19 @@
+locals {
+ datacenters = join(",", var.datacenters)
+}
+
+resource "nomad_job" "nomad_job_csit_shim" {
+ jobspec = templatefile(
+ "${path.module}/conf/nomad/vpp-device.hcl.tftpl",
+ {
+ datacenters = local.datacenters
+ job_name = var.job_name
+ group_count = var.group_count
+ cpu = var.cpu
+ mem = var.memory
+ image_aarch64 = var.image_aarch64
+ image_x86_64 = var.image_x86_64
+ }
+ )
+ detach = false
+} \ No newline at end of file
diff --git a/fdio.infra.terraform/1n_nmd/vpp_device/variables.tf b/fdio.infra.terraform/terraform-nomad-vpp-device/variables.tf
index 401be66f27..0a11e1da3b 100644
--- a/fdio.infra.terraform/1n_nmd/vpp_device/variables.tf
+++ b/fdio.infra.terraform/terraform-nomad-vpp-device/variables.tf
@@ -1,42 +1,42 @@
# Nomad
-variable "nomad_datacenters" {
- description = "Nomad data centers"
+variable "datacenters" {
+ description = "Specifies the list of DCs to be considered placing this task"
type = list(string)
default = ["dc1"]
}
# CSIT SHIM
-variable "csit_shim_job_name" {
+variable "job_name" {
description = "CSIT SHIM job name"
type = string
default = "prod-csit-shim"
}
-variable "csit_shim_group_count" {
+variable "group_count" {
description = "Number of CSIT SHIM group instances"
type = number
default = 1
}
-variable "csit_shim_cpu" {
+variable "cpu" {
description = "CSIT SHIM task CPU"
type = number
default = 2000
}
-variable "csit_shim_mem" {
+variable "memory" {
description = "CSIT SHIM task memory"
type = number
default = 10000
}
-variable "csit_shim_image_aarch64" {
+variable "image_aarch64" {
description = "CSIT SHIM AARCH64 docker image"
type = string
default = "fdiotools/csit_shim-ubuntu2004:prod-aarch64"
}
-variable "csit_shim_image_x86_64" {
+variable "image_x86_64" {
description = "CSIT SHIM X86_64 docker image"
type = string
default = "fdiotools/csit_shim-ubuntu2004:prod-x86_64"
diff --git a/fdio.infra.terraform/terraform-nomad-vpp-device/versions.tf b/fdio.infra.terraform/terraform-nomad-vpp-device/versions.tf
new file mode 100644
index 0000000000..a319c35908
--- /dev/null
+++ b/fdio.infra.terraform/terraform-nomad-vpp-device/versions.tf
@@ -0,0 +1,9 @@
+terraform {
+ required_providers {
+ nomad = {
+ source = "hashicorp/nomad"
+ version = ">= 1.4.19"
+ }
+ }
+ required_version = ">= 1.3.7"
+}