diff options
author | pmikus <pmikus@cisco.com> | 2020-12-09 20:11:42 +0000 |
---|---|---|
committer | Peter Mikus <pmikus@cisco.com> | 2020-12-12 07:59:30 +0000 |
commit | fd4d85865e145f12330a4266be48fbdd6e919cf4 (patch) | |
tree | a94252782163c250a29a0551ba48df2e023d0692 | |
parent | 688a68a8f6d8a69a85cb76421a16dff9c4105c52 (diff) |
Refactor storage solution
+ Minio terraform module
+ XL mode enabled with erasure code
+ Upload script as a sample
+ Nginx terraform module
+ Updating ansible to reflect changes
Signed-off-by: pmikus <pmikus@cisco.com>
Change-Id: Ia8c439b749aa0de82bd6f1d0cfbecce4d7000a8f
37 files changed, 1501 insertions, 230 deletions
diff --git a/resources/tools/terraform/1n_nmd/main.tf b/resources/tools/terraform/1n_nmd/main.tf deleted file mode 100644 index 330f647476..0000000000 --- a/resources/tools/terraform/1n_nmd/main.tf +++ /dev/null @@ -1,40 +0,0 @@ -terraform { - # This module is now only being tested with Terraform 0.13.5+. - required_version = ">= 0.13.5" -} - -provider "nomad" { - address = var.nomad_provider_address - alias = "yul1" -} - -# For convenience in simple configurations, a child module automatically -# inherits default (un-aliased) provider configurations from its parent. -# This means that explicit provider blocks appear only in the root module, -# and downstream modules can simply declare resources for that provider -# and have them automatically associated with the root provider -# configurations. - -# prod_storage -# + prod-group1-nginx -# + prod-group1-storage -# + services -# + docs.nginx.service.consul -# + logs.nginx.service.consul -# + storage.nginx.service.consul -module "prod_storage" { - source = "./prod_storage" - providers = { - nomad = nomad.yul1 - } -} - -# prod_vpp_device -# + prod-csit-shim-amd -# + prod-csit-shim-arm -module "prod_vpp_device" { - source = "./prod_vpp_device" - providers = { - nomad = nomad.yul1 - } -}
\ No newline at end of file diff --git a/resources/tools/terraform/1n_nmd/prod_storage/resources.tf b/resources/tools/terraform/1n_nmd/prod_storage/resources.tf deleted file mode 100644 index 4c42927be6..0000000000 --- a/resources/tools/terraform/1n_nmd/prod_storage/resources.tf +++ /dev/null @@ -1,9 +0,0 @@ -resource "nomad_job" "prod_nginx" { - provider = nomad - jobspec = file("${path.module}/prod-nginx.nomad") -} - -resource "nomad_job" "prod_storage" { - provider = nomad - jobspec = file("${path.module}/prod-storage.nomad") -}
\ No newline at end of file diff --git a/resources/tools/terraform/3n_azure_fsv2/.gitignore b/resources/tools/terraform/3n_azure_fsv2/.gitignore deleted file mode 100644 index fc64f0039f..0000000000 --- a/resources/tools/terraform/3n_azure_fsv2/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.terraform/ -.terraform.tfstate.lock.info -terraform.tfstate -terraform.tfstate.backup diff --git a/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.32.8.14.yaml b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.32.8.14.yaml index 4849733e18..dd2a2f5b95 100644 --- a/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.32.8.14.yaml +++ b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.32.8.14.yaml @@ -34,6 +34,7 @@ nomad_options: driver.raw_exec.enable: 1 docker.cleanup.image: true docker.privileged.enabled: true + docker.volumes.enabled: true driver.whitelist: "docker,raw_exec,exec" fingerprint.network.disallow_link_local: true nomad_retry_servers: [ "10.30.51.30", "10.30.51.32", "10.30.51.33" ] diff --git a/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.32.8.15.yaml b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.32.8.15.yaml index e0058185cc..7e103cf8c7 100644 --- a/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.32.8.15.yaml +++ b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.32.8.15.yaml @@ -34,6 +34,7 @@ nomad_options: driver.raw_exec.enable: 1 docker.cleanup.image: true docker.privileged.enabled: true + docker.volumes.enabled: true driver.whitelist: "docker,raw_exec,exec" fingerprint.network.disallow_link_local: true nomad_retry_servers: [ "10.30.51.30", "10.30.51.32", "10.30.51.33" ] diff --git a/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.32.8.16.yaml b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.32.8.16.yaml index 893b33e5bd..e1b47cae15 100644 --- a/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.32.8.16.yaml +++ b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.32.8.16.yaml @@ -34,6 +34,7 @@ nomad_options: driver.raw_exec.enable: 1 docker.cleanup.image: true docker.privileged.enabled: true + docker.volumes.enabled: true driver.whitelist: "docker,raw_exec,exec" fingerprint.network.disallow_link_local: true nomad_retry_servers: [ "10.30.51.30", "10.30.51.32", "10.30.51.33" ] diff --git a/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.32.8.17.yaml b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.32.8.17.yaml new file mode 100644 index 0000000000..6a4e238bdc --- /dev/null +++ b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.32.8.17.yaml @@ -0,0 +1,59 @@ +--- +# file: host_vars/10.32.8.17.yaml + +hostname: "s57-nomad" +inventory_ipmi_hostname: "10.30.55.17" + +# User management. +users: + - username: localadmin + groups: [adm, sudo] + password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + - username: testuser + groups: [adm, sudo] + password: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1" + ssh_key: + - "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAgObJFDIMmPwQhhkjAynvlbwpM5yeSewyaE7vTLaFf4uFz4vmsE2hFf6B2xXHUGLVwoVfk91UeK7LOGrdDpoDDHzvPZXj5NmZI+WiWax5y2pQZNkcSZws0ENCeEc4hPwc4veJ1JmhokF4Bsmu14HyFMaFUhM8897jtJwsh+9fLA/no0iPGaQqEtRUQhkV+P4jCEPoY0qdRZAzVw/rY4EGAMhsJe3EJmyj63OfrrkG3+hvSLFo5pDxHQr3pZd/c6ukI7xMef48PosAvGCm3oxzb/Gu9PZIGuHLczY+tCnzCkY7MO7E+IWgjXrUAfYwSWz8XmFmA9LLe26DT5jkcK8hGQ== pmikus@cisco.com" + +# Nomad settings. +nomad_certificates: + - src: "{{ vault_nomad_v3_ca_file }}" + dest: "{{ nomad_ca_file }}" + - src: "{{ vault_nomad_v3_cert_file }}" + dest: "{{ nomad_cert_file }}" + - src: "{{ vault_nomad_v3_key_file }}" + dest: "{{ nomad_key_file }}" +nomad_datacenter: "yul1" +nomad_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +nomad_name: "{{ hostname }}-{{ ansible_architecture }}" +nomad_node_role: "client" +nomad_node_class: "builder" +nomad_options: + driver.raw_exec.enable: 1 + docker.cleanup.image: true + docker.privileged.enabled: true + docker.volumes.enabled: true + driver.whitelist: "docker,raw_exec,exec" + fingerprint.network.disallow_link_local: true +nomad_retry_servers: [ "10.30.51.30", "10.30.51.32", "10.30.51.33" ] +nomad_servers: [ "10.30.51.32:4647", "10.30.51.33:4647", "10.30.51.30:4647" ] + +# Consul settigs. +consul_nomad_integration: true +consul_certificates: + - src: "{{ vault_consul_v1_ca_file }}" + dest: "{{ consul_ca_file }}" + - src: "{{ vault_consul_v1_cert_file }}" + dest: "{{ consul_cert_file }}" + - src: "{{ vault_consul_v1_key_file }}" + dest: "{{ consul_key_file }}" +consul_datacenter: "yul1" +consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ==" +consul_node_name: "{{ hostname }}" +consul_node_role: "client" +consul_retry_servers: + - "10.30.51.30" + - "10.30.51.32" + - "10.30.51.33"
\ No newline at end of file diff --git a/resources/tools/testbed-setup/ansible/inventories/lf_inventory/hosts b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/hosts index d89fe4888f..741da675f1 100644 --- a/resources/tools/testbed-setup/ansible/inventories/lf_inventory/hosts +++ b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/hosts @@ -61,6 +61,7 @@ all: 10.32.8.14: #s46-nomad - skylake 10.32.8.15: #s47-nomad - skylake 10.32.8.16: #s48-nomad - skylake + 10.32.8.17: #s57-nomad - skylake 10.30.51.39: #s53-nomad - thunderx 88xx 10.30.51.40: #s54-nomad - thunderx 88xx 10.30.51.65: #s52-nomad - thunderx 88xx diff --git a/terraform-ci-infra/1n_nmd/.gitignore b/terraform-ci-infra/1n_nmd/.gitignore new file mode 100644 index 0000000000..8b1a7baa3e --- /dev/null +++ b/terraform-ci-infra/1n_nmd/.gitignore @@ -0,0 +1 @@ +.terraform/
\ No newline at end of file diff --git a/terraform-ci-infra/1n_nmd/.terraform.lock.hcl b/terraform-ci-infra/1n_nmd/.terraform.lock.hcl new file mode 100755 index 0000000000..3a2e4ef85f --- /dev/null +++ b/terraform-ci-infra/1n_nmd/.terraform.lock.hcl @@ -0,0 +1,58 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/nomad" { + version = "1.4.11" + constraints = "~> 1.4.9" + hashes = [ + "h1:ElEvgyMfWoWyQbB6c51rGTjQlZKWf3QOvf5NhX/Vuyw=", + "zh:150d0ab25241a42f2ac5008878e0106c0887eec15181a40bee1433b87f01b8ed", + "zh:1d4ccda0729f12060e7f4ce5c6d83042d6d38ba2e546b68722ccb74832793b0c", + "zh:2964652181f59097aa1126f4b215b9232702b1a56df3e017e6b5683d5615714b", + "zh:42843e68bca24280e84ec600ae5d8f085fb26bdcdb4c0ccff2139ed81e1cb8c1", + "zh:4c6d90d40a360d84bc84c9af35c64056237537fa0f8118bf890fcf4e71f7b0f6", + "zh:51771ce42a99d7d4f5a4306367eee4cea72391c07f1f1c55c3c4a5c6a9eca53d", + "zh:6ab2389f1be6bb39d4966c253bf4fc77348e90f7e07ed2abb5ec5c90a4bbb615", + "zh:9b109254ea7ca6a5b0ede33b406cf5fed779f05672891bbd1cc3255c9cb17663", + "zh:a38c929d4fd03193cce94178c0fbaa1f7f09e93223ac71dc77c834d429b1c7c9", + "zh:bdc9bc10a1ecb5ae3da651df1709bf9d5474f25e310b73bdf32c86417674d32b", + ] +} + +provider "registry.terraform.io/hashicorp/template" { + version = "2.1.2" + constraints = "~> 2.1.2" + hashes = [ + "h1:8NcPRk3yxQtUlAT/YGfjBEJ76rQI2ljARYeIEjhtWho=", + "zh:149e4bf47ac21b67f6567767afcd29caaf0b0ca43714748093a00a2a98cd17a8", + "zh:2ff61a5eb7550e0df2baefccea78a8b621faef76154aad7ddf9c85c1d69f7ebf", + "zh:3b2d9a9f80754eb0a250a80e0dfdef385501697850a54ead744d1615e60fe648", + "zh:545b93c818035aac59f4a821644276c123a74aa210b1221974d832a6009df201", + "zh:5508512a522152a302591b399512fa736d8f57088c85ca74f7e00014db3a8c26", + "zh:701b56016a6db814ade171877375a2429b45979f97c2d112e4f2103f0433eb08", + "zh:90fc08165958538d8a099f17282c615d5b13f86bb215af33e2ca7551bf81996f", + "zh:affa6d409060c01a610854a395970d76701d0b07696e1ed6776b3f3b58014104", + "zh:b66ffed670bf0ed6714fa4ac26444a8e22f71ec6da134faf0b1f77fb2c13c666", + "zh:bb3d87db22f0ac56717eadde39690e3e27c1c01b10d0ecbe2e6e39f1e5c4d808", + "zh:c54b9693c9f348591432aabc808cbe1786bcda1cb70d312ef62a24545a14f945", + "zh:e7c8f8506cee5fa28f842714857d412a2b09e61127a0efe2a164c2f3d9bf2619", + ] +} + +provider "registry.terraform.io/hashicorp/vault" { + version = "2.16.0" + constraints = ">= 2.14.0" + hashes = [ + "h1:h27r8aZ5nwRfEelTQnJoA8s3TndJYPI7+3Df1DXIhXk=", + "zh:13dde74fac618ee0281bad60a60966a85d4a59c8420b15fd6499996fa1bc99b3", + "zh:1daad9da6c82f43cbd07bf1cfedf3c6960fb2f96bc59f94fd75d361065b8c51a", + "zh:68075d8e1824b745267ce9e4ef693b202b9282561811de6ccf7298935f482128", + "zh:86df4a4405413d575cd72985483163e62539afbd659fddef59fc637875b707e2", + "zh:8f8306ada4c1c44945ce5205e4f1cfbf5e3d46a9da2f3a1d0be17d32e4935845", + "zh:9eb75febcd6fcca9885a6f5e93293a200b2effbe31f47d265cc4d1346d42d29e", + "zh:a658b55b239bc7ad59a2bf55e7abbfe5f0111d37dd68b5d4bb947eee93969092", + "zh:af10679c241bd0e0168f57c24e839fd24c747f3e84b7bb6de3cd791471709249", + "zh:ee3030f36846de45450be088aa4c2b1f69246b2ecf40d7ea6a15a7f09ac5e5d0", + "zh:efe6cc23f77336604358e627b0b565c1421a97376e510a9cdaaf849524944713", + ] +} diff --git a/terraform-ci-infra/1n_nmd/main.tf b/terraform-ci-infra/1n_nmd/main.tf new file mode 100644 index 0000000000..cb79b416c0 --- /dev/null +++ b/terraform-ci-infra/1n_nmd/main.tf @@ -0,0 +1,71 @@ +# For convenience in simple configurations, a child module automatically +# inherits default (un-aliased) provider configurations from its parent. +# This means that explicit provider blocks appear only in the root module, +# and downstream modules can simply declare resources for that provider +# and have them automatically associated with the root provider +# configurations. +module "minio" { + source = "./minio" + providers = { + nomad = nomad.yul1 + } + + # nomad + nomad_datacenters = [ "yul1" ] + nomad_host_volume = "prod-volume-data1-1" + + # minio + minio_job_name = "prod-minio" + minio_group_count = 4 + minio_service_name = "storage" + minio_host = "http://10.32.8.1{4...7}" + minio_port = 9000 + minio_container_image = "minio/minio:RELEASE.2020-12-03T05-49-24Z" + minio_vault_secret = { + use_vault_provider = false, + vault_kv_policy_name = "kv-secret", + vault_kv_path = "secret/data/minio", + vault_kv_field_access_key = "access_key", + vault_kv_field_secret_key = "secret_key" + } + minio_data_dir = "/data/" + minio_use_host_volume = true + minio_use_canary = true + minio_resource_proxy = { + cpu = 200 + memory = 128 + } + minio_envs = [ "MINIO_BROWSER=\"off\"" ] + + # minio client + mc_job_name = "prod-mc" + mc_container_image = "minio/mc:RELEASE.2020-12-10T01-26-17Z" + mc_extra_commands = [ + "mc policy set public LOCALMINIO/logs.fd.io", + "mc policy set public LOCALMINIO/docs.fd.io", + "mc ilm add --expiry-days "180" LOCALMINIO/logs.fd.io", + "mc admin user add LOCALMINIO storage Storage1234", + "mc admin policy set LOCALMINIO writeonly user=storage" + ] + minio_buckets = [ "logs.fd.io", "docs.fd.io" ] +} + +module "nginx" { + source = "./nginx" + providers = { + nomad = nomad.yul1 + } + + # nomad + nomad_datacenters = [ "yul1" ] + + # nginx + nginx_job_name = "prod-nginx" +} + +#module "vpp_device" { +# source = "./vpp_device" +# providers = { +# nomad = nomad.yul1 +# } +#}
\ No newline at end of file diff --git a/terraform-ci-infra/1n_nmd/minio/conf/nomad/mc.hcl b/terraform-ci-infra/1n_nmd/minio/conf/nomad/mc.hcl new file mode 100644 index 0000000000..238003bb00 --- /dev/null +++ b/terraform-ci-infra/1n_nmd/minio/conf/nomad/mc.hcl @@ -0,0 +1,73 @@ +job "${job_name}" { + # The "region" parameter specifies the region in which to execute the job. + # If omitted, this inherits the default region name of "global". + # region = "global" + # + # The "datacenters" parameter specifies the list of datacenters which should + # be considered when placing this task. This must be provided. + datacenters = "${datacenters}" + + # The "type" parameter controls the type of job, which impacts the scheduler's + # decision on placement. This configuration is optional and defaults to + # "service". For a full list of job types and their differences, please see + # the online documentation. + # + # For more information, please see the online documentation at: + # + # https://www.nomadproject.io/docs/jobspec/schedulers.html + # + type = "batch" + + # The "group" stanza defines a series of tasks that should be co-located on + # the same Nomad client. Any task within a group will be placed on the same + # client. + # + # For more information and examples on the "group" stanza, please see + # the online documentation at: + # + # https://www.nomadproject.io/docs/job-specification/group.html + # + group "prod-group1-mc" { + task "prod-task1-create-buckets" { + # The "driver" parameter specifies the task driver that should be used to + # run the task. + driver = "docker" + + %{ if use_vault_provider } + vault { + policies = "${vault_kv_policy_name}" + } + %{ endif } + + # The "config" stanza specifies the driver configuration, which is passed + # directly to the driver to start the task. The details of configurations + # are specific to each driver, so please see specific driver + # documentation for more information. + config { + image = "${image}" + entrypoint = [ + "/bin/sh", + "-c", + "${command}" + ] + dns_servers = [ "$${attr.unique.network.ip-address}" ] + privileged = false + } + + # The env stanza configures a list of environment variables to populate + # the task's environment before starting. + env { + %{ if use_vault_provider } + {{ with secret "${vault_kv_path}" }} + MINIO_ACCESS_KEY = "{{ .Data.data.${vault_kv_field_access_key} }}" + MINIO_SECRET_KEY = "{{ .Data.data.${vault_kv_field_secret_key} }}" + {{ end }} + %{ else } + MINIO_ACCESS_KEY = "${access_key}" + MINIO_SECRET_KEY = "${secret_key}" + %{ endif } + ${ envs } + } + } + } +} diff --git a/resources/tools/terraform/1n_nmd/prod_storage/prod-storage.nomad b/terraform-ci-infra/1n_nmd/minio/conf/nomad/minio.hcl index 4e8f7ecb98..a2df44f666 100644 --- a/resources/tools/terraform/1n_nmd/prod_storage/prod-storage.nomad +++ b/terraform-ci-infra/1n_nmd/minio/conf/nomad/minio.hcl @@ -1,11 +1,11 @@ -job "prod-storage" { +job "${job_name}" { # The "region" parameter specifies the region in which to execute the job. # If omitted, this inherits the default region name of "global". # region = "global" # # The "datacenters" parameter specifies the list of datacenters which should # be considered when placing this task. This must be provided. - datacenters = [ "yul1" ] + datacenters = "${datacenters}" # The "type" parameter controls the type of job, which impacts the scheduler's # decision on placement. This configuration is optional and defaults to @@ -16,25 +16,27 @@ job "prod-storage" { # # https://www.nomadproject.io/docs/jobspec/schedulers.html # - type = "service" + type = "service" update { # The "max_parallel" parameter specifies the maximum number of updates to # perform in parallel. In this case, this specifies to update a single task # at a time. - max_parallel = 0 + max_parallel = 1 + + health_check = "checks" # The "min_healthy_time" parameter specifies the minimum time the allocation # must be in the healthy state before it is marked as healthy and unblocks # further allocations from being updated. - min_healthy_time = "10s" + min_healthy_time = "10s" # The "healthy_deadline" parameter specifies the deadline in which the # allocation must be marked as healthy after which the allocation is # automatically transitioned to unhealthy. Transitioning to unhealthy will # fail the deployment and potentially roll back the job if "auto_revert" is # set to true. - healthy_deadline = "3m" + healthy_deadline = "3m" # The "progress_deadline" parameter specifies the deadline in which an # allocation must be marked as healthy. The deadline begins when the first @@ -44,11 +46,7 @@ job "prod-storage" { # deployment is marked as failed. progress_deadline = "10m" - # The "auto_revert" parameter specifies if the job should auto-revert to the - # last stable job on deployment failure. A job is marked as stable if all the - # allocations as part of its deployment were marked healthy. - auto_revert = false - +%{ if use_canary } # The "canary" parameter specifies that changes to the job that would result # in destructive updates should create the specified number of canaries # without stopping any previous allocations. Once the operator determines the @@ -58,15 +56,28 @@ job "prod-storage" { # Further, setting "canary" equal to the count of the task group allows # blue/green deployments. When the job is updated, a full set of the new # version is deployed and upon promotion the old version is stopped. - canary = 0 + canary = 1 + + # Specifies if the job should auto-promote to the canary version when all + # canaries become healthy during a deployment. Defaults to false which means + # canaries must be manually updated with the nomad deployment promote + # command. + auto_promote = true + + # The "auto_revert" parameter specifies if the job should auto-revert to the + # last stable job on deployment failure. A job is marked as stable if all the + # allocations as part of its deployment were marked healthy. + auto_revert = true +%{ endif } } # All groups in this job should be scheduled on different hosts. constraint { - operator = "distinct_hosts" - value = "true" + operator = "distinct_hosts" + value = "true" } + # The "group" stanza defines a series of tasks that should be co-located on # the same Nomad client. Any task within a group will be placed on the same # client. # @@ -75,25 +86,20 @@ job "prod-storage" { # # https://www.nomadproject.io/docs/job-specification/group.html # - group "prod-group1-storage" { + group "prod-group1-minio" { # The "count" parameter specifies the number of the task groups that should # be running under this group. This value must be non-negative and defaults # to 1. - count = 2 - - # Hard coding prefered node as primary. - affinity { - attribute = "${attr.unique.hostname}" - value = "s46-nomad" - weight = 100 - } + count = ${group_count} # https://www.nomadproject.io/docs/job-specification/volume - volume "prod-volume1-storage" { + %{ if use_host_volume } + volume "prod-volume1-minio" { type = "host" read_only = false - source = "prod-volume-data1-1" + source = "${host_volume}" } + %{ endif } # The "task" stanza creates an individual unit of work, such as a Docker # container, web application, or batch processing. @@ -103,36 +109,54 @@ job "prod-storage" { # # https://www.nomadproject.io/docs/job-specification/task.html # - task "prod-task1-storage" { + task "prod-task1-minio" { # The "driver" parameter specifies the task driver that should be used to # run the task. - driver = "docker" + driver = "docker" + %{ if use_host_volume } volume_mount { - volume = "prod-volume1-storage" - destination = "/data/" - read_only = false + volume = "prod-volume1-minio" + destination = "${data_dir}" + read_only = false + } + %{ endif } + + %{ if use_vault_provider } + vault { + policies = "${vault_kv_policy_name}" } + %{ endif } # The "config" stanza specifies the driver configuration, which is passed # directly to the driver to start the task. The details of configurations # are specific to each driver, so please see specific driver # documentation for more information. config { - image = "minio/minio:RELEASE.2020-11-19T23-48-16Z" - dns_servers = [ "${attr.unique.network.ip-address}" ] - command = "server" - args = [ "/data/" ] + image = "${image}" + dns_servers = [ "$${attr.unique.network.ip-address}" ] + network_mode = "host" + command = "server" + args = [ "${host}:${port}${data_dir}" ] port_map { - http = 9000 + http = ${port} } - privileged = false + privileged = false } + # The env stanza configures a list of environment variables to populate + # the task's environment before starting. env { - MINIO_ACCESS_KEY = "minio" - MINIO_SECRET_KEY = "minio123" - MINIO_BROWSER = "off" +%{ if use_vault_provider } +{{ with secret "${vault_kv_path}" }} + MINIO_ACCESS_KEY = "{{ .Data.data.${vault_kv_field_access_key} }}" + MINIO_SECRET_KEY = "{{ .Data.data.${vault_kv_field_secret_key} }}" +{{ end }} +%{ else } + MINIO_ACCESS_KEY = "${access_key}" + MINIO_SECRET_KEY = "${secret_key}" +%{ endif } + ${ envs } } # The service stanza instructs Nomad to register a service with Consul. @@ -143,11 +167,11 @@ job "prod-storage" { # https://www.nomadproject.io/docs/job-specification/service.html # service { - name = "storage" + name = "${service_name}" port = "http" - tags = [ "storage${NOMAD_ALLOC_INDEX}" ] + tags = [ "storage$${NOMAD_ALLOC_INDEX}" ] check { - name = "alive" + name = "Min.io Server HTTP Check Live" type = "http" port = "http" protocol = "http" @@ -155,7 +179,16 @@ job "prod-storage" { path = "/minio/health/live" interval = "10s" timeout = "2s" - task = "${TASK}" + } + check { + name = "Min.io Server HTTP Check Ready" + type = "http" + port = "http" + protocol = "http" + method = "GET" + path = "/minio/health/ready" + interval = "10s" + timeout = "2s" } } @@ -170,87 +203,26 @@ job "prod-storage" { # https://www.nomadproject.io/docs/job-specification/resources.html # resources { - cpu = 2000 - memory = 2048 + cpu = ${cpu} + memory = ${memory} + # The network stanza specifies the networking requirements for the task + # group, including the network mode and port allocations. When scheduling + # jobs in Nomad they are provisioned across your fleet of machines along + # with other jobs and services. Because you don't know in advance what host + # your job will be provisioned on, Nomad will provide your tasks with + # network configuration when they start up. + # + # For more information and examples on the "template" stanza, please see + # the online documentation at: + # + # https://www.nomadproject.io/docs/job-specification/network.html + # network { port "http" { - static = 9000 + static = ${port} } } } } - - task "prod-task2-sync" { - # The "raw_exec" parameter specifies the task driver that should be used - # to run the task. - driver = "raw_exec" - - # The "template" stanza instructs Nomad to manage a template, such as - # a configuration file or script. This template can optionally pull data - # from Consul or Vault to populate runtime configuration data. - # - # For more information and examples on the "template" stanza, please see - # the online documentation at: - # - # https://www.nomadproject.io/docs/job-specification/template.html - # - template { - data = <<EOH -#!/bin/bash - -INOTIFY_OPTONS="--recursive --monitor" -VOLUMES="/data/logs.fd.io /data/docs.fd.io" - -if [ '{{ env "attr.unique.network.ip-address" }}' = "10.32.8.14" ]; then -echo "Running notify daemon" - inotifywait -e moved_to ${INOTIFY_OPTONS} ${VOLUMES} | \ - while read path action file; do - key="testuser" - secret="Csit1234" - - resource=${path#"/data"}${file} - date=$(date -R) - _signature="PUT\n\napplication/octet-stream\n${date}\n${resource}" - signature=$(echo -en ${_signature} | openssl sha1 -hmac ${secret} -binary | base64) - - curl -v -X PUT -T "${path}${file}" \ - -H "Host: storage0.storage.service.consul:9000" \ - -H "Date: ${date}" \ - -H "Content-Type: application/octet-stream" \ - -H "Authorization: AWS ${key}:${signature}" \ - http://storage0.storage.service.consul:9000${resource} - done -else - while :; do sleep 2073600; done -fi - -EOH - destination = "local/sync.sh" - perms = "755" - } - - # The "config" stanza specifies the driver configuration, which is passed - # directly to the driver to start the task. The details of configurations - # are specific to each driver, so please see specific driver - # documentation for more information. - config { - command = "local/sync.sh" - } - - # The "resources" stanza describes the requirements a task needs to - # execute. Resource requirements include memory, network, cpu, and more. - # This ensures the task will execute on a machine that contains enough - # resource capacity. - # - # For more information and examples on the "resources" stanza, please see - # the online documentation at: - # - # https://www.nomadproject.io/docs/job-specification/resources.html - # - resources { - cpu = 500 - memory = 256 - } - } } -}
\ No newline at end of file +} diff --git a/terraform-ci-infra/1n_nmd/minio/main.tf b/terraform-ci-infra/1n_nmd/minio/main.tf new file mode 100644 index 0000000000..6877211e6a --- /dev/null +++ b/terraform-ci-infra/1n_nmd/minio/main.tf @@ -0,0 +1,82 @@ +locals { + datacenters = join(",", var.nomad_datacenters) + minio_env_vars = join("\n", + concat([ + ], var.minio_envs) + ) + mc_env_vars = join("\n", + concat([ + ], var.mc_envs) + ) + mc_formatted_bucket_list = formatlist("LOCALMINIO/%s", var.minio_buckets) + mc_add_config_command = concat( + [ + "mc", + "config", + "host", + "add", + "LOCALMINIO", + "http://${var.minio_service_name}.service.consul:${var.minio_port}", + "$MINIO_ACCESS_KEY", + "$MINIO_SECRET_KEY", + ]) + mc_create_bucket_command = concat(["mc", "mb", "-p"], local.mc_formatted_bucket_list) + command = join(" ", concat(local.mc_add_config_command, ["&&"], local.mc_create_bucket_command, [";"], concat(var.mc_extra_commands))) +} + +data "template_file" "nomad_job_minio" { + template = file("${path.module}/conf/nomad/minio.hcl") + vars = { + job_name = var.minio_job_name + datacenters = local.datacenters + use_canary = var.minio_use_canary + group_count = var.minio_group_count + use_host_volume = var.minio_use_host_volume + host_volume = var.nomad_host_volume + service_name = var.minio_service_name + host = var.minio_host + port = var.minio_port + upstreams = jsonencode(var.minio_upstreams) + cpu_proxy = var.minio_resource_proxy.cpu + memory_proxy = var.minio_resource_proxy.memory + use_vault_provider = var.minio_vault_secret.use_vault_provider + image = var.minio_container_image + access_key = var.minio_access_key + secret_key = var.minio_secret_key + data_dir = var.minio_data_dir + envs = local.minio_env_vars + cpu = var.minio_cpu + memory = var.minio_memory + } +} + +data "template_file" "nomad_job_mc" { + template = file("${path.module}/conf/nomad/mc.hcl") + vars = { + job_name = var.mc_job_name + service_name = var.mc_service_name + datacenters = local.datacenters + minio_service_name = var.minio_service_name + minio_port = var.minio_port + image = var.mc_container_image + access_key = var.minio_access_key + secret_key = var.minio_secret_key + use_vault_provider = var.minio_vault_secret.use_vault_provider + envs = local.mc_env_vars + command = local.command + } +} + +resource "nomad_job" "nomad_job_minio" { + jobspec = data.template_file.nomad_job_minio.rendered + detach = false +} + +resource "nomad_job" "nomad_job_mc" { + jobspec = data.template_file.nomad_job_mc.rendered + detach = false + + depends_on = [ + nomad_job.nomad_job_minio + ] +}
\ No newline at end of file diff --git a/terraform-ci-infra/1n_nmd/minio/outputs.tf b/terraform-ci-infra/1n_nmd/minio/outputs.tf new file mode 100644 index 0000000000..c3d7f15af6 --- /dev/null +++ b/terraform-ci-infra/1n_nmd/minio/outputs.tf @@ -0,0 +1,21 @@ +output "minio_service_name" { + description = "Minio service name" + value = data.template_file.nomad_job_minio.vars.service_name +} + +output "minio_access_key" { + description = "Minio access key" + value = data.template_file.nomad_job_minio.vars.access_key + sensitive = true +} + +output "minio_secret_key" { + description = "Minio secret key" + value = data.template_file.nomad_job_minio.vars.secret_key + sensitive = true +} + +output "minio_port" { + description = "Minio port number" + value = data.template_file.nomad_job_minio.vars.port +} diff --git a/terraform-ci-infra/1n_nmd/minio/variables.tf b/terraform-ci-infra/1n_nmd/minio/variables.tf new file mode 100644 index 0000000000..01f94fcd75 --- /dev/null +++ b/terraform-ci-infra/1n_nmd/minio/variables.tf @@ -0,0 +1,170 @@ +# Nomad +variable "nomad_datacenters" { + description = "Nomad data centers" + type = list(string) + default = [ "dc1" ] +} + +variable "nomad_host_volume" { + description = "Nomad Host Volume" + type = string + default = "persistence" +} + +# Minio +variable "minio_job_name" { + description = "Minio job name" + type = string + default = "minio" +} + +variable "minio_service_name" { + description = "Minio service name" + type = string + default = "minio" +} + +variable "minio_group_count" { + description = "Number of Minio group instances" + type = number + default = 1 +} + +variable "minio_host" { + description = "Minio host" + type = string + default = "127.0.0.1" +} + +variable "minio_port" { + description = "Minio port" + type = number + default = 9000 +} + +variable "minio_cpu" { + description = "CPU allocation for Minio" + type = number + default = 200 +} + +variable "minio_memory" { + description = "Memory allocation for Minio" + type = number + default = 1024 +} + +variable "minio_container_image" { + description = "Minio docker image" + type = string + default = "minio/minio:latest" +} + +variable "minio_envs" { + description = "Minio environment variables" + type = list(string) + default = [] +} + +variable "minio_access_key" { + description = "Minio access key" + type = string + default = "minio" +} + +variable "minio_secret_key" { + description = "Minio secret key" + type = string + default = "minio123" +} + +variable "minio_data_dir" { + description = "Minio server data dir" + type = string + default = "/data/" +} + +variable "minio_use_host_volume" { + description = "Use Nomad host volume feature" + type = bool + default = false +} + +variable "minio_use_canary" { + description = "Uses canary deployment for Minio" + type = bool + default = false +} + +variable "minio_vault_secret" { + description = "Set of properties to be able to fetch secret from vault" + type = object({ + use_vault_provider = bool, + vault_kv_policy_name = string, + vault_kv_path = string, + vault_kv_field_access_key = string, + vault_kv_field_secret_key = string + }) +} + +variable "minio_resource_proxy" { + description = "Minio proxy resources" + type = object({ + cpu = number, + memory = number + }) + default = { + cpu = 200, + memory = 128 + } + validation { + condition = var.minio_resource_proxy.cpu >= 200 && var.minio_resource_proxy.memory >= 128 + error_message = "Proxy resource must be at least: cpu=200, memory=128." + } +} + +# MC +variable "mc_job_name" { + description = "Minio client job name" + type = string + default = "mc" +} + +variable "mc_service_name" { + description = "Minio client service name" + type = string + default = "mc" +} + +variable "mc_container_image" { + description = "Minio client docker image" + type = string + default = "minio/mc:latest" +} + +variable "mc_envs" { + description = "Minio client environment variables" + type = list(string) + default = [] +} + +variable "minio_buckets" { + description = "List of buckets to create on startup" + type = list(string) + default = [] +} + +variable "minio_upstreams" { + description = "List of upstream services (list of object with service_name, port)" + type = list(object({ + service_name = string, + port = number, + })) + default = [] +} + +variable "mc_extra_commands" { + description = "Extra commands to run in MC container after creating buckets" + type = list(string) + default = [""] +}
\ No newline at end of file diff --git a/terraform-ci-infra/1n_nmd/minio/versions.tf b/terraform-ci-infra/1n_nmd/minio/versions.tf new file mode 100644 index 0000000000..960bd4bba6 --- /dev/null +++ b/terraform-ci-infra/1n_nmd/minio/versions.tf @@ -0,0 +1,13 @@ +terraform { + required_providers { + nomad = { + source = "hashicorp/nomad" + version = "~> 1.4.9" + } + template = { + source = "hashicorp/template" + version = "~> 2.1.2" + } + } + required_version = ">= 0.13" +} diff --git a/resources/tools/terraform/1n_nmd/prod_storage/prod-nginx.nomad b/terraform-ci-infra/1n_nmd/nginx/conf/nomad/nginx.hcl index 2af62a06c3..9cb5f8ef45 100644 --- a/resources/tools/terraform/1n_nmd/prod_storage/prod-nginx.nomad +++ b/terraform-ci-infra/1n_nmd/nginx/conf/nomad/nginx.hcl @@ -1,11 +1,11 @@ -job "prod-nginx" { +job "${job_name}" { # The "region" parameter specifies the region in which to execute the job. # If omitted, this inherits the default region name of "global". # region = "global" # # The "datacenters" parameter specifies the list of datacenters which should # be considered when placing this task. This must be provided. - datacenters = [ "yul1" ] + datacenters = "${datacenters}" # The "type" parameter controls the type of job, which impacts the scheduler's # decision on placement. This configuration is optional and defaults to @@ -72,7 +72,6 @@ job "prod-nginx" { unlimited = true } - # The "group" stanza defines a series of tasks that should be co-located on # the same Nomad client. Any task within a group will be placed on the same # client. @@ -97,32 +96,6 @@ job "prod-nginx" { mode = "fail" } - # All groups in this job should be scheduled on different hosts. - constraint { - operator = "distinct_hosts" - value = "false" - } - - # Prioritize one node. - affinity { - attribute = "${attr.unique.hostname}" - value = "s46-nomad" - weight = 100 - } - - # The volume stanza allows the group to specify that it requires a given - # volume from the cluster. - # - # For more information and examples on the "volume" stanza, please see - # the online documentation at: - # - # https://www.nomadproject.io/docs/job-specification/volume - volume "prod-volume1-storage" { - type = "host" - read_only = false - source = "prod-volume-data1-1" - } - # The "task" stanza creates an individual unit of work, such as a Docker # container, web application, or batch processing. # @@ -136,19 +109,13 @@ job "prod-nginx" { # run the task. driver = "docker" - volume_mount { - volume = "prod-volume1-storage" - destination = "/data/" - read_only = true - } - # The "config" stanza specifies the driver configuration, which is passed # directly to the driver to start the task. The details of configurations # are specific to each driver, so please see specific driver # documentation for more information. config { image = "nginx:stable" - dns_servers = [ "${attr.unique.network.ip-address}" ] + dns_servers = [ "$${attr.unique.network.ip-address}" ] port_map { https = 443 } @@ -156,6 +123,7 @@ job "prod-nginx" { volumes = [ "/etc/consul.d/ssl/consul.pem:/etc/ssl/certs/nginx-cert.pem", "/etc/consul.d/ssl/consul-key.pem:/etc/ssl/private/nginx-key.pem", + "custom/upstream.conf:/etc/nginx/conf.d/upstream.conf", "custom/logs.conf:/etc/nginx/conf.d/logs.conf", "custom/docs.conf:/etc/nginx/conf.d/docs.conf" ] @@ -172,6 +140,17 @@ job "prod-nginx" { # template { data = <<EOH + upstream storage { + server storage0.storage.service.consul:9000; + server storage1.storage.service.consul:9000; + server storage2.storage.service.consul:9000; + server storage3.storage.service.consul:9000; + } + EOH + destination = "custom/upstream.conf" + } + template { + data = <<EOH server { listen 443 ssl default_server; server_name logs.nginx.service.consul; @@ -184,29 +163,48 @@ job "prod-nginx" { ssl_certificate /etc/ssl/certs/nginx-cert.pem; ssl_certificate_key /etc/ssl/private/nginx-key.pem; location / { - root /data/logs.fd.io; - index _; - autoindex on; - autoindex_exact_size on; - autoindex_format html; - autoindex_localtime off; + chunked_transfer_encoding off; + proxy_connect_timeout 300; + proxy_http_version 1.1; + proxy_set_header Host $host:$server_port; + proxy_set_header Connection ""; + proxy_pass http://storage/logs.fd.io/; + server_name_in_redirect off; } - location ~ \.(html.gz)$ { - root /data/logs.fd.io; + location ~ (.*html.gz)$ { add_header Content-Encoding gzip; add_header Content-Type text/html; + chunked_transfer_encoding off; + proxy_connect_timeout 300; + proxy_http_version 1.1; + proxy_set_header Host $host:$server_port; + proxy_set_header Connection ""; + proxy_pass http://storage/logs.fd.io/$1; + server_name_in_redirect off; } - location ~ \.(txt.gz|log.gz)$ { - root /data/logs.fd.io; + location ~ (.*txt.gz|.*log.gz)$ { add_header Content-Encoding gzip; add_header Content-Type text/plain; + chunked_transfer_encoding off; + proxy_connect_timeout 300; + proxy_http_version 1.1; + proxy_set_header Host $host:$server_port; + proxy_set_header Connection ""; + proxy_pass http://storage/logs.fd.io/$1; + server_name_in_redirect off; } - location ~ \.(xml.gz)$ { - root /data/logs.fd.io; + location ~ (.*xml.gz)$ { add_header Content-Encoding gzip; add_header Content-Type application/xml; + chunked_transfer_encoding off; + proxy_connect_timeout 300; + proxy_http_version 1.1; + proxy_set_header Host $host:$server_port; + proxy_set_header Connection ""; + proxy_pass http://storage/logs.fd.io/$1; + server_name_in_redirect off; } - } + } EOH destination = "custom/logs.conf" } @@ -215,17 +213,22 @@ job "prod-nginx" { server { listen 443 ssl; server_name docs.nginx.service.consul; - keepalive_timeout 70; - ssl_session_cache shared:SSL:10m; - ssl_session_timeout 10m; - ssl_protocols TLSv1.2; - ssl_prefer_server_ciphers on; - ssl_ciphers "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384"; + keepalive_timeout 70; + ssl_session_cache shared:SSL:10m; + ssl_session_timeout 10m; + ssl_protocols TLSv1.2; + ssl_prefer_server_ciphers on; + ssl_ciphers "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384"; ssl_certificate /etc/ssl/certs/nginx-cert.pem; ssl_certificate_key /etc/ssl/private/nginx-key.pem; location / { - root /data/docs.fd.io; - index index.html index.htm; + chunked_transfer_encoding off; + proxy_connect_timeout 300; + proxy_http_version 1.1; + proxy_set_header Host $host:$server_port; + proxy_set_header Connection ""; + proxy_pass http://storage/docs.fd.io/; + server_name_in_redirect off; } } EOH diff --git a/terraform-ci-infra/1n_nmd/nginx/main.tf b/terraform-ci-infra/1n_nmd/nginx/main.tf new file mode 100644 index 0000000000..96696922ff --- /dev/null +++ b/terraform-ci-infra/1n_nmd/nginx/main.tf @@ -0,0 +1,16 @@ +locals { + datacenters = join(",", var.nomad_datacenters) +} + +data "template_file" "nomad_job_nginx" { + template = file("${path.module}/conf/nomad/nginx.hcl") + vars = { + job_name = var.nginx_job_name + datacenters = local.datacenters + } +} + +resource "nomad_job" "nomad_job_nginx" { + jobspec = data.template_file.nomad_job_nginx.rendered + detach = false +}
\ No newline at end of file diff --git a/terraform-ci-infra/1n_nmd/nginx/variables.tf b/terraform-ci-infra/1n_nmd/nginx/variables.tf new file mode 100644 index 0000000000..1a1e45f89f --- /dev/null +++ b/terraform-ci-infra/1n_nmd/nginx/variables.tf @@ -0,0 +1,13 @@ +# Nomad +variable "nomad_datacenters" { + description = "Nomad data centers" + type = list(string) + default = [ "dc1" ] +} + +# Nginx +variable "nginx_job_name" { + description = "Nginx job name" + type = string + default = "nginx" +}
\ No newline at end of file diff --git a/terraform-ci-infra/1n_nmd/nginx/versions.tf b/terraform-ci-infra/1n_nmd/nginx/versions.tf new file mode 100644 index 0000000000..960bd4bba6 --- /dev/null +++ b/terraform-ci-infra/1n_nmd/nginx/versions.tf @@ -0,0 +1,13 @@ +terraform { + required_providers { + nomad = { + source = "hashicorp/nomad" + version = "~> 1.4.9" + } + template = { + source = "hashicorp/template" + version = "~> 2.1.2" + } + } + required_version = ">= 0.13" +} diff --git a/terraform-ci-infra/1n_nmd/providers.tf b/terraform-ci-infra/1n_nmd/providers.tf new file mode 100644 index 0000000000..c7e0ad8bf3 --- /dev/null +++ b/terraform-ci-infra/1n_nmd/providers.tf @@ -0,0 +1,21 @@ +terraform { + required_providers { + nomad = { + source = "hashicorp/nomad" + version = "~> 1.4.9" + } + template = { + source = "hashicorp/template" + version = "~> 2.1.2" + } + vault = { + version = ">=2.14.0" + } + } + required_version = ">= 0.13" +} + +provider "nomad" { + address = var.nomad_provider_address + alias = "yul1" +}
\ No newline at end of file diff --git a/terraform-ci-infra/1n_nmd/terraform.tfstate b/terraform-ci-infra/1n_nmd/terraform.tfstate new file mode 100644 index 0000000000..e9945334db --- /dev/null +++ b/terraform-ci-infra/1n_nmd/terraform.tfstate @@ -0,0 +1,309 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 217, + "lineage": "e4e7f30a-652d-7a31-e31c-5e3a3388c9b9", + "outputs": {}, + "resources": [ + { + "module": "module.minio", + "mode": "data", + "type": "template_file", + "name": "nomad_job_mc", + "provider": "provider[\"registry.terraform.io/hashicorp/template\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "filename": null, + "id": "94812595996d00fbe02cda0864eb70690405e0c46f721b864202411459a016b0", + "rendered": "job \"prod-mc\" {\n # The \"region\" parameter specifies the region in which to execute the job.\n # If omitted, this inherits the default region name of \"global\".\n # region = \"global\"\n #\n # The \"datacenters\" parameter specifies the list of datacenters which should\n # be considered when placing this task. This must be provided.\n datacenters = \"yul1\"\n\n # The \"type\" parameter controls the type of job, which impacts the scheduler's\n # decision on placement. This configuration is optional and defaults to\n # \"service\". For a full list of job types and their differences, please see\n # the online documentation.\n #\n # For more information, please see the online documentation at:\n #\n # https://www.nomadproject.io/docs/jobspec/schedulers.html\n #\n type = \"batch\"\n\n # The \"group\" stanza defines a series of tasks that should be co-located on\n # the same Nomad client. Any task within a group will be placed on the same\n # client.\n #\n # For more information and examples on the \"group\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/group.html\n #\n group \"prod-group1-mc\" {\n task \"prod-task1-create-buckets\" {\n # The \"driver\" parameter specifies the task driver that should be used to\n # run the task.\n driver = \"docker\"\n\n \n\n # The \"config\" stanza specifies the driver configuration, which is passed\n # directly to the driver to start the task. The details of configurations\n # are specific to each driver, so please see specific driver\n # documentation for more information.\n config {\n image = \"minio/mc:RELEASE.2020-12-10T01-26-17Z\"\n entrypoint = [\n \"/bin/sh\",\n \"-c\",\n \"mc config host add LOCALMINIO http://storage.service.consul:9000 $MINIO_ACCESS_KEY $MINIO_SECRET_KEY \u0026\u0026 mc mb -p LOCALMINIO/logs.fd.io LOCALMINIO/docs.fd.io ; mc policy set public LOCALMINIO/logs.fd.io mc policy set public LOCALMINIO/docs.fd.io mc admin user add LOCALMINIO storage Storage1234\"\n ]\n dns_servers = [ \"${attr.unique.network.ip-address}\" ]\n privileged = false\n }\n\n # The env stanza configures a list of environment variables to populate\n # the task's environment before starting.\n env {\n \n MINIO_ACCESS_KEY = \"minio\"\n MINIO_SECRET_KEY = \"minio123\"\n \n \n }\n }\n }\n}\n", + "template": "job \"${job_name}\" {\n # The \"region\" parameter specifies the region in which to execute the job.\n # If omitted, this inherits the default region name of \"global\".\n # region = \"global\"\n #\n # The \"datacenters\" parameter specifies the list of datacenters which should\n # be considered when placing this task. This must be provided.\n datacenters = \"${datacenters}\"\n\n # The \"type\" parameter controls the type of job, which impacts the scheduler's\n # decision on placement. This configuration is optional and defaults to\n # \"service\". For a full list of job types and their differences, please see\n # the online documentation.\n #\n # For more information, please see the online documentation at:\n #\n # https://www.nomadproject.io/docs/jobspec/schedulers.html\n #\n type = \"batch\"\n\n # The \"group\" stanza defines a series of tasks that should be co-located on\n # the same Nomad client. Any task within a group will be placed on the same\n # client.\n #\n # For more information and examples on the \"group\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/group.html\n #\n group \"prod-group1-mc\" {\n task \"prod-task1-create-buckets\" {\n # The \"driver\" parameter specifies the task driver that should be used to\n # run the task.\n driver = \"docker\"\n\n %{ if use_vault_provider }\n vault {\n policies = \"${vault_kv_policy_name}\"\n }\n %{ endif }\n\n # The \"config\" stanza specifies the driver configuration, which is passed\n # directly to the driver to start the task. The details of configurations\n # are specific to each driver, so please see specific driver\n # documentation for more information.\n config {\n image = \"${image}\"\n entrypoint = [\n \"/bin/sh\",\n \"-c\",\n \"${command}\"\n ]\n dns_servers = [ \"$${attr.unique.network.ip-address}\" ]\n privileged = false\n }\n\n # The env stanza configures a list of environment variables to populate\n # the task's environment before starting.\n env {\n %{ if use_vault_provider }\n {{ with secret \"${vault_kv_path}\" }}\n MINIO_ACCESS_KEY = \"{{ .Data.data.${vault_kv_field_access_key} }}\"\n MINIO_SECRET_KEY = \"{{ .Data.data.${vault_kv_field_secret_key} }}\"\n {{ end }}\n %{ else }\n MINIO_ACCESS_KEY = \"${access_key}\"\n MINIO_SECRET_KEY = \"${secret_key}\"\n %{ endif }\n ${ envs }\n }\n }\n }\n}\n", + "vars": { + "access_key": "minio", + "command": "mc config host add LOCALMINIO http://storage.service.consul:9000 $MINIO_ACCESS_KEY $MINIO_SECRET_KEY \u0026\u0026 mc mb -p LOCALMINIO/logs.fd.io LOCALMINIO/docs.fd.io ; mc policy set public LOCALMINIO/logs.fd.io mc policy set public LOCALMINIO/docs.fd.io mc admin user add LOCALMINIO storage Storage1234", + "datacenters": "yul1", + "envs": "", + "image": "minio/mc:RELEASE.2020-12-10T01-26-17Z", + "job_name": "prod-mc", + "minio_port": "9000", + "minio_service_name": "storage", + "secret_key": "minio123", + "service_name": "mc", + "use_vault_provider": "false" + } + }, + "sensitive_attributes": [] + } + ] + }, + { + "module": "module.minio", + "mode": "data", + "type": "template_file", + "name": "nomad_job_minio", + "provider": "provider[\"registry.terraform.io/hashicorp/template\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "filename": null, + "id": "7316d70d802832981e08dc3c9b6b9637d8998e7cf173786b341f788dc513512b", + "rendered": "job \"prod-minio\" {\n # The \"region\" parameter specifies the region in which to execute the job.\n # If omitted, this inherits the default region name of \"global\".\n # region = \"global\"\n #\n # The \"datacenters\" parameter specifies the list of datacenters which should\n # be considered when placing this task. This must be provided.\n datacenters = \"yul1\"\n\n # The \"type\" parameter controls the type of job, which impacts the scheduler's\n # decision on placement. This configuration is optional and defaults to\n # \"service\". For a full list of job types and their differences, please see\n # the online documentation.\n #\n # For more information, please see the online documentation at:\n #\n # https://www.nomadproject.io/docs/jobspec/schedulers.html\n #\n type = \"service\"\n\n update {\n # The \"max_parallel\" parameter specifies the maximum number of updates to\n # perform in parallel. In this case, this specifies to update a single task\n # at a time.\n max_parallel = 1\n\n health_check = \"checks\"\n\n # The \"min_healthy_time\" parameter specifies the minimum time the allocation\n # must be in the healthy state before it is marked as healthy and unblocks\n # further allocations from being updated.\n min_healthy_time = \"10s\"\n\n # The \"healthy_deadline\" parameter specifies the deadline in which the\n # allocation must be marked as healthy after which the allocation is\n # automatically transitioned to unhealthy. Transitioning to unhealthy will\n # fail the deployment and potentially roll back the job if \"auto_revert\" is\n # set to true.\n healthy_deadline = \"3m\"\n\n # The \"progress_deadline\" parameter specifies the deadline in which an\n # allocation must be marked as healthy. The deadline begins when the first\n # allocation for the deployment is created and is reset whenever an allocation\n # as part of the deployment transitions to a healthy state. If no allocation\n # transitions to the healthy state before the progress deadline, the\n # deployment is marked as failed.\n progress_deadline = \"10m\"\n\n\n # The \"canary\" parameter specifies that changes to the job that would result\n # in destructive updates should create the specified number of canaries\n # without stopping any previous allocations. Once the operator determines the\n # canaries are healthy, they can be promoted which unblocks a rolling update\n # of the remaining allocations at a rate of \"max_parallel\".\n #\n # Further, setting \"canary\" equal to the count of the task group allows\n # blue/green deployments. When the job is updated, a full set of the new\n # version is deployed and upon promotion the old version is stopped.\n canary = 1\n\n # Specifies if the job should auto-promote to the canary version when all\n # canaries become healthy during a deployment. Defaults to false which means\n # canaries must be manually updated with the nomad deployment promote\n # command.\n auto_promote = true\n\n # The \"auto_revert\" parameter specifies if the job should auto-revert to the\n # last stable job on deployment failure. A job is marked as stable if all the\n # allocations as part of its deployment were marked healthy.\n auto_revert = true\n\n }\n\n # All groups in this job should be scheduled on different hosts.\n constraint {\n operator = \"distinct_hosts\"\n value = \"true\"\n }\n\n # The \"group\" stanza defines a series of tasks that should be co-located on\n # the same Nomad client. Any task within a group will be placed on the same\n # client.\n #\n # For more information and examples on the \"group\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/group.html\n #\n group \"prod-group1-minio\" {\n # The \"count\" parameter specifies the number of the task groups that should\n # be running under this group. This value must be non-negative and defaults\n # to 1.\n count = 4\n\n # https://www.nomadproject.io/docs/job-specification/volume\n \n volume \"prod-volume1-minio\" {\n type = \"host\"\n read_only = false\n source = \"prod-volume-data1-1\"\n }\n \n\n # The \"task\" stanza creates an individual unit of work, such as a Docker\n # container, web application, or batch processing.\n #\n # For more information and examples on the \"task\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/task.html\n #\n task \"prod-task1-minio\" {\n # The \"driver\" parameter specifies the task driver that should be used to\n # run the task.\n driver = \"docker\"\n\n \n volume_mount {\n volume = \"prod-volume1-minio\"\n destination = \"/data/\"\n read_only = false\n }\n \n\n \n\n # The \"config\" stanza specifies the driver configuration, which is passed\n # directly to the driver to start the task. The details of configurations\n # are specific to each driver, so please see specific driver\n # documentation for more information.\n config {\n image = \"minio/minio:RELEASE.2020-12-03T05-49-24Z\"\n dns_servers = [ \"${attr.unique.network.ip-address}\" ]\n network_mode = \"host\"\n command = \"server\"\n args = [ \"http://10.32.8.1{4...7}:9000/data/\" ]\n port_map {\n http = 9000\n }\n privileged = false\n }\n\n # The env stanza configures a list of environment variables to populate\n # the task's environment before starting.\n env {\n\n MINIO_ACCESS_KEY = \"minio\"\n MINIO_SECRET_KEY = \"minio123\"\n\n MINIO_BROWSER=\"off\"\n }\n\n # The service stanza instructs Nomad to register a service with Consul.\n #\n # For more information and examples on the \"task\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/service.html\n #\n service {\n name = \"storage\"\n port = \"http\"\n tags = [ \"storage${NOMAD_ALLOC_INDEX}\" ]\n check {\n name = \"Min.io Server HTTP Check Live\"\n type = \"http\"\n port = \"http\"\n protocol = \"http\"\n method = \"GET\"\n path = \"/minio/health/live\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n check {\n name = \"Min.io Server HTTP Check Ready\"\n type = \"http\"\n port = \"http\"\n protocol = \"http\"\n method = \"GET\"\n path = \"/minio/health/ready\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n # The \"resources\" stanza describes the requirements a task needs to\n # execute. Resource requirements include memory, network, cpu, and more.\n # This ensures the task will execute on a machine that contains enough\n # resource capacity.\n #\n # For more information and examples on the \"resources\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/resources.html\n #\n resources {\n cpu = 200\n memory = 1024\n # The network stanza specifies the networking requirements for the task\n # group, including the network mode and port allocations. When scheduling\n # jobs in Nomad they are provisioned across your fleet of machines along\n # with other jobs and services. Because you don't know in advance what host\n # your job will be provisioned on, Nomad will provide your tasks with\n # network configuration when they start up.\n #\n # For more information and examples on the \"template\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/network.html\n #\n network {\n port \"http\" {\n static = 9000\n }\n }\n }\n }\n }\n}\n", + "template": "job \"${job_name}\" {\n # The \"region\" parameter specifies the region in which to execute the job.\n # If omitted, this inherits the default region name of \"global\".\n # region = \"global\"\n #\n # The \"datacenters\" parameter specifies the list of datacenters which should\n # be considered when placing this task. This must be provided.\n datacenters = \"${datacenters}\"\n\n # The \"type\" parameter controls the type of job, which impacts the scheduler's\n # decision on placement. This configuration is optional and defaults to\n # \"service\". For a full list of job types and their differences, please see\n # the online documentation.\n #\n # For more information, please see the online documentation at:\n #\n # https://www.nomadproject.io/docs/jobspec/schedulers.html\n #\n type = \"service\"\n\n update {\n # The \"max_parallel\" parameter specifies the maximum number of updates to\n # perform in parallel. In this case, this specifies to update a single task\n # at a time.\n max_parallel = 1\n\n health_check = \"checks\"\n\n # The \"min_healthy_time\" parameter specifies the minimum time the allocation\n # must be in the healthy state before it is marked as healthy and unblocks\n # further allocations from being updated.\n min_healthy_time = \"10s\"\n\n # The \"healthy_deadline\" parameter specifies the deadline in which the\n # allocation must be marked as healthy after which the allocation is\n # automatically transitioned to unhealthy. Transitioning to unhealthy will\n # fail the deployment and potentially roll back the job if \"auto_revert\" is\n # set to true.\n healthy_deadline = \"3m\"\n\n # The \"progress_deadline\" parameter specifies the deadline in which an\n # allocation must be marked as healthy. The deadline begins when the first\n # allocation for the deployment is created and is reset whenever an allocation\n # as part of the deployment transitions to a healthy state. If no allocation\n # transitions to the healthy state before the progress deadline, the\n # deployment is marked as failed.\n progress_deadline = \"10m\"\n\n%{ if use_canary }\n # The \"canary\" parameter specifies that changes to the job that would result\n # in destructive updates should create the specified number of canaries\n # without stopping any previous allocations. Once the operator determines the\n # canaries are healthy, they can be promoted which unblocks a rolling update\n # of the remaining allocations at a rate of \"max_parallel\".\n #\n # Further, setting \"canary\" equal to the count of the task group allows\n # blue/green deployments. When the job is updated, a full set of the new\n # version is deployed and upon promotion the old version is stopped.\n canary = 1\n\n # Specifies if the job should auto-promote to the canary version when all\n # canaries become healthy during a deployment. Defaults to false which means\n # canaries must be manually updated with the nomad deployment promote\n # command.\n auto_promote = true\n\n # The \"auto_revert\" parameter specifies if the job should auto-revert to the\n # last stable job on deployment failure. A job is marked as stable if all the\n # allocations as part of its deployment were marked healthy.\n auto_revert = true\n%{ endif }\n }\n\n # All groups in this job should be scheduled on different hosts.\n constraint {\n operator = \"distinct_hosts\"\n value = \"true\"\n }\n\n # The \"group\" stanza defines a series of tasks that should be co-located on\n # the same Nomad client. Any task within a group will be placed on the same\n # client.\n #\n # For more information and examples on the \"group\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/group.html\n #\n group \"prod-group1-minio\" {\n # The \"count\" parameter specifies the number of the task groups that should\n # be running under this group. This value must be non-negative and defaults\n # to 1.\n count = ${group_count}\n\n # https://www.nomadproject.io/docs/job-specification/volume\n %{ if use_host_volume }\n volume \"prod-volume1-minio\" {\n type = \"host\"\n read_only = false\n source = \"${host_volume}\"\n }\n %{ endif }\n\n # The \"task\" stanza creates an individual unit of work, such as a Docker\n # container, web application, or batch processing.\n #\n # For more information and examples on the \"task\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/task.html\n #\n task \"prod-task1-minio\" {\n # The \"driver\" parameter specifies the task driver that should be used to\n # run the task.\n driver = \"docker\"\n\n %{ if use_host_volume }\n volume_mount {\n volume = \"prod-volume1-minio\"\n destination = \"${data_dir}\"\n read_only = false\n }\n %{ endif }\n\n %{ if use_vault_provider }\n vault {\n policies = \"${vault_kv_policy_name}\"\n }\n %{ endif }\n\n # The \"config\" stanza specifies the driver configuration, which is passed\n # directly to the driver to start the task. The details of configurations\n # are specific to each driver, so please see specific driver\n # documentation for more information.\n config {\n image = \"${image}\"\n dns_servers = [ \"$${attr.unique.network.ip-address}\" ]\n network_mode = \"host\"\n command = \"server\"\n args = [ \"${host}:${port}${data_dir}\" ]\n port_map {\n http = ${port}\n }\n privileged = false\n }\n\n # The env stanza configures a list of environment variables to populate\n # the task's environment before starting.\n env {\n%{ if use_vault_provider }\n{{ with secret \"${vault_kv_path}\" }}\n MINIO_ACCESS_KEY = \"{{ .Data.data.${vault_kv_field_access_key} }}\"\n MINIO_SECRET_KEY = \"{{ .Data.data.${vault_kv_field_secret_key} }}\"\n{{ end }}\n%{ else }\n MINIO_ACCESS_KEY = \"${access_key}\"\n MINIO_SECRET_KEY = \"${secret_key}\"\n%{ endif }\n ${ envs }\n }\n\n # The service stanza instructs Nomad to register a service with Consul.\n #\n # For more information and examples on the \"task\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/service.html\n #\n service {\n name = \"${service_name}\"\n port = \"http\"\n tags = [ \"storage$${NOMAD_ALLOC_INDEX}\" ]\n check {\n name = \"Min.io Server HTTP Check Live\"\n type = \"http\"\n port = \"http\"\n protocol = \"http\"\n method = \"GET\"\n path = \"/minio/health/live\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n check {\n name = \"Min.io Server HTTP Check Ready\"\n type = \"http\"\n port = \"http\"\n protocol = \"http\"\n method = \"GET\"\n path = \"/minio/health/ready\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n # The \"resources\" stanza describes the requirements a task needs to\n # execute. Resource requirements include memory, network, cpu, and more.\n # This ensures the task will execute on a machine that contains enough\n # resource capacity.\n #\n # For more information and examples on the \"resources\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/resources.html\n #\n resources {\n cpu = ${cpu}\n memory = ${memory}\n # The network stanza specifies the networking requirements for the task\n # group, including the network mode and port allocations. When scheduling\n # jobs in Nomad they are provisioned across your fleet of machines along\n # with other jobs and services. Because you don't know in advance what host\n # your job will be provisioned on, Nomad will provide your tasks with\n # network configuration when they start up.\n #\n # For more information and examples on the \"template\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/network.html\n #\n network {\n port \"http\" {\n static = ${port}\n }\n }\n }\n }\n }\n}\n", + "vars": { + "access_key": "minio", + "cpu": "200", + "cpu_proxy": "200", + "data_dir": "/data/", + "datacenters": "yul1", + "envs": "MINIO_BROWSER=\"off\"", + "group_count": "4", + "host": "http://10.32.8.1{4...7}", + "host_volume": "prod-volume-data1-1", + "image": "minio/minio:RELEASE.2020-12-03T05-49-24Z", + "job_name": "prod-minio", + "memory": "1024", + "memory_proxy": "128", + "port": "9000", + "secret_key": "minio123", + "service_name": "storage", + "upstreams": "[]", + "use_canary": "true", + "use_host_volume": "true", + "use_vault_provider": "false" + } + }, + "sensitive_attributes": [] + } + ] + }, + { + "module": "module.minio", + "mode": "managed", + "type": "nomad_job", + "name": "nomad_job_mc", + "provider": "provider[\"registry.terraform.io/hashicorp/nomad\"].yul1", + "instances": [ + { + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "a2d75168-4719-f98e-e906-3f04173b986c" + ], + "datacenters": [ + "yul1" + ], + "deployment_id": "", + "deployment_status": "", + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": false, + "id": "prod-mc", + "jobspec": "job \"prod-mc\" {\n # The \"region\" parameter specifies the region in which to execute the job.\n # If omitted, this inherits the default region name of \"global\".\n # region = \"global\"\n #\n # The \"datacenters\" parameter specifies the list of datacenters which should\n # be considered when placing this task. This must be provided.\n datacenters = \"yul1\"\n\n # The \"type\" parameter controls the type of job, which impacts the scheduler's\n # decision on placement. This configuration is optional and defaults to\n # \"service\". For a full list of job types and their differences, please see\n # the online documentation.\n #\n # For more information, please see the online documentation at:\n #\n # https://www.nomadproject.io/docs/jobspec/schedulers.html\n #\n type = \"batch\"\n\n # The \"group\" stanza defines a series of tasks that should be co-located on\n # the same Nomad client. Any task within a group will be placed on the same\n # client.\n #\n # For more information and examples on the \"group\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/group.html\n #\n group \"prod-group1-mc\" {\n task \"prod-task1-create-buckets\" {\n # The \"driver\" parameter specifies the task driver that should be used to\n # run the task.\n driver = \"docker\"\n\n \n\n # The \"config\" stanza specifies the driver configuration, which is passed\n # directly to the driver to start the task. The details of configurations\n # are specific to each driver, so please see specific driver\n # documentation for more information.\n config {\n image = \"minio/mc:RELEASE.2020-12-10T01-26-17Z\"\n entrypoint = [\n \"/bin/sh\",\n \"-c\",\n \"mc config host add LOCALMINIO http://storage.service.consul:9000 $MINIO_ACCESS_KEY $MINIO_SECRET_KEY \u0026\u0026 mc mb -p LOCALMINIO/logs.fd.io LOCALMINIO/docs.fd.io ; mc policy set public LOCALMINIO/logs.fd.io mc policy set public LOCALMINIO/docs.fd.io mc admin user add LOCALMINIO storage Storage1234\"\n ]\n dns_servers = [ \"${attr.unique.network.ip-address}\" ]\n privileged = false\n }\n\n # The env stanza configures a list of environment variables to populate\n # the task's environment before starting.\n env {\n \n MINIO_ACCESS_KEY = \"minio\"\n MINIO_SECRET_KEY = \"minio123\"\n \n \n }\n }\n }\n}\n", + "json": null, + "modify_index": "5890754", + "name": "prod-mc", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "prod-group1-mc", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "prod-task1-create-buckets", + "volume_mounts": null + } + ], + "volumes": null + } + ], + "type": "batch" + }, + "sensitive_attributes": [], + "private": "bnVsbA==", + "dependencies": [ + "module.minio.data.template_file.nomad_job_mc", + "module.minio.data.template_file.nomad_job_minio", + "module.minio.nomad_job.nomad_job_minio" + ] + } + ] + }, + { + "module": "module.minio", + "mode": "managed", + "type": "nomad_job", + "name": "nomad_job_minio", + "provider": "provider[\"registry.terraform.io/hashicorp/nomad\"].yul1", + "instances": [ + { + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "d0f0c90c-06ac-a636-ce44-60dcc1a58229", + "f01d4aec-65ca-ea7d-60f2-0a9457bb2449", + "b7a92de2-22b4-a52b-1de3-33a1dcf6b69b", + "bbb736b1-94a3-0cd1-d776-aaceb0ed9d7f", + "885b8e38-cb0e-93ba-cb68-da9e2d7bbeb7", + "ac621157-f496-d868-8396-c2a463007bb6", + "0ead2172-72ee-c52e-abd2-d9a6f100d27c", + "476369b7-d15d-19ec-25a7-2f46da76eeb6" + ], + "datacenters": [ + "yul1" + ], + "deployment_id": "d5b25946-c510-1017-3a50-f83c3616f718", + "deployment_status": "successful", + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": false, + "id": "prod-minio", + "jobspec": "job \"prod-minio\" {\n # The \"region\" parameter specifies the region in which to execute the job.\n # If omitted, this inherits the default region name of \"global\".\n # region = \"global\"\n #\n # The \"datacenters\" parameter specifies the list of datacenters which should\n # be considered when placing this task. This must be provided.\n datacenters = \"yul1\"\n\n # The \"type\" parameter controls the type of job, which impacts the scheduler's\n # decision on placement. This configuration is optional and defaults to\n # \"service\". For a full list of job types and their differences, please see\n # the online documentation.\n #\n # For more information, please see the online documentation at:\n #\n # https://www.nomadproject.io/docs/jobspec/schedulers.html\n #\n type = \"service\"\n\n update {\n # The \"max_parallel\" parameter specifies the maximum number of updates to\n # perform in parallel. In this case, this specifies to update a single task\n # at a time.\n max_parallel = 1\n\n health_check = \"checks\"\n\n # The \"min_healthy_time\" parameter specifies the minimum time the allocation\n # must be in the healthy state before it is marked as healthy and unblocks\n # further allocations from being updated.\n min_healthy_time = \"10s\"\n\n # The \"healthy_deadline\" parameter specifies the deadline in which the\n # allocation must be marked as healthy after which the allocation is\n # automatically transitioned to unhealthy. Transitioning to unhealthy will\n # fail the deployment and potentially roll back the job if \"auto_revert\" is\n # set to true.\n healthy_deadline = \"3m\"\n\n # The \"progress_deadline\" parameter specifies the deadline in which an\n # allocation must be marked as healthy. The deadline begins when the first\n # allocation for the deployment is created and is reset whenever an allocation\n # as part of the deployment transitions to a healthy state. If no allocation\n # transitions to the healthy state before the progress deadline, the\n # deployment is marked as failed.\n progress_deadline = \"10m\"\n\n\n # The \"canary\" parameter specifies that changes to the job that would result\n # in destructive updates should create the specified number of canaries\n # without stopping any previous allocations. Once the operator determines the\n # canaries are healthy, they can be promoted which unblocks a rolling update\n # of the remaining allocations at a rate of \"max_parallel\".\n #\n # Further, setting \"canary\" equal to the count of the task group allows\n # blue/green deployments. When the job is updated, a full set of the new\n # version is deployed and upon promotion the old version is stopped.\n canary = 1\n\n # Specifies if the job should auto-promote to the canary version when all\n # canaries become healthy during a deployment. Defaults to false which means\n # canaries must be manually updated with the nomad deployment promote\n # command.\n auto_promote = true\n\n # The \"auto_revert\" parameter specifies if the job should auto-revert to the\n # last stable job on deployment failure. A job is marked as stable if all the\n # allocations as part of its deployment were marked healthy.\n auto_revert = true\n\n }\n\n # All groups in this job should be scheduled on different hosts.\n constraint {\n operator = \"distinct_hosts\"\n value = \"true\"\n }\n\n # The \"group\" stanza defines a series of tasks that should be co-located on\n # the same Nomad client. Any task within a group will be placed on the same\n # client.\n #\n # For more information and examples on the \"group\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/group.html\n #\n group \"prod-group1-minio\" {\n # The \"count\" parameter specifies the number of the task groups that should\n # be running under this group. This value must be non-negative and defaults\n # to 1.\n count = 4\n\n # https://www.nomadproject.io/docs/job-specification/volume\n \n volume \"prod-volume1-minio\" {\n type = \"host\"\n read_only = false\n source = \"prod-volume-data1-1\"\n }\n \n\n # The \"task\" stanza creates an individual unit of work, such as a Docker\n # container, web application, or batch processing.\n #\n # For more information and examples on the \"task\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/task.html\n #\n task \"prod-task1-minio\" {\n # The \"driver\" parameter specifies the task driver that should be used to\n # run the task.\n driver = \"docker\"\n\n \n volume_mount {\n volume = \"prod-volume1-minio\"\n destination = \"/data/\"\n read_only = false\n }\n \n\n \n\n # The \"config\" stanza specifies the driver configuration, which is passed\n # directly to the driver to start the task. The details of configurations\n # are specific to each driver, so please see specific driver\n # documentation for more information.\n config {\n image = \"minio/minio:RELEASE.2020-12-03T05-49-24Z\"\n dns_servers = [ \"${attr.unique.network.ip-address}\" ]\n network_mode = \"host\"\n command = \"server\"\n args = [ \"http://10.32.8.1{4...7}:9000/data/\" ]\n port_map {\n http = 9000\n }\n privileged = false\n }\n\n # The env stanza configures a list of environment variables to populate\n # the task's environment before starting.\n env {\n\n MINIO_ACCESS_KEY = \"minio\"\n MINIO_SECRET_KEY = \"minio123\"\n\n MINIO_BROWSER=\"off\"\n }\n\n # The service stanza instructs Nomad to register a service with Consul.\n #\n # For more information and examples on the \"task\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/service.html\n #\n service {\n name = \"storage\"\n port = \"http\"\n tags = [ \"storage${NOMAD_ALLOC_INDEX}\" ]\n check {\n name = \"Min.io Server HTTP Check Live\"\n type = \"http\"\n port = \"http\"\n protocol = \"http\"\n method = \"GET\"\n path = \"/minio/health/live\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n check {\n name = \"Min.io Server HTTP Check Ready\"\n type = \"http\"\n port = \"http\"\n protocol = \"http\"\n method = \"GET\"\n path = \"/minio/health/ready\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n # The \"resources\" stanza describes the requirements a task needs to\n # execute. Resource requirements include memory, network, cpu, and more.\n # This ensures the task will execute on a machine that contains enough\n # resource capacity.\n #\n # For more information and examples on the \"resources\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/resources.html\n #\n resources {\n cpu = 200\n memory = 1024\n # The network stanza specifies the networking requirements for the task\n # group, including the network mode and port allocations. When scheduling\n # jobs in Nomad they are provisioned across your fleet of machines along\n # with other jobs and services. Because you don't know in advance what host\n # your job will be provisioned on, Nomad will provide your tasks with\n # network configuration when they start up.\n #\n # For more information and examples on the \"template\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/network.html\n #\n network {\n port \"http\" {\n static = 9000\n }\n }\n }\n }\n }\n}\n", + "json": null, + "modify_index": "5890717", + "name": "prod-minio", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 4, + "meta": {}, + "name": "prod-group1-minio", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "prod-task1-minio", + "volume_mounts": [ + { + "destination": "/data/", + "read_only": false, + "volume": "prod-volume1-minio" + } + ] + } + ], + "volumes": [ + { + "name": "prod-volume1-minio", + "read_only": false, + "source": "prod-volume-data1-1", + "type": "host" + } + ] + } + ], + "type": "service" + }, + "sensitive_attributes": [], + "private": "bnVsbA==", + "dependencies": [ + "module.minio.data.template_file.nomad_job_minio" + ] + } + ] + }, + { + "module": "module.nginx", + "mode": "data", + "type": "template_file", + "name": "nomad_job_nginx", + "provider": "provider[\"registry.terraform.io/hashicorp/template\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "filename": null, + "id": "b3a1010354c485039fd14f63d271c8a72f805d16d29377fe1883aa0b233c92f4", + "rendered": "job \"prod-nginx\" {\n # The \"region\" parameter specifies the region in which to execute the job.\n # If omitted, this inherits the default region name of \"global\".\n # region = \"global\"\n #\n # The \"datacenters\" parameter specifies the list of datacenters which should\n # be considered when placing this task. This must be provided.\n datacenters = \"yul1\"\n\n # The \"type\" parameter controls the type of job, which impacts the scheduler's\n # decision on placement. This configuration is optional and defaults to\n # \"service\". For a full list of job types and their differences, please see\n # the online documentation.\n #\n # For more information, please see the online documentation at:\n #\n # https://www.nomadproject.io/docs/jobspec/schedulers.html\n #\n type = \"service\"\n\n update {\n # The \"max_parallel\" parameter specifies the maximum number of updates to\n # perform in parallel. In this case, this specifies to update a single task\n # at a time.\n max_parallel = 0\n\n # The \"min_healthy_time\" parameter specifies the minimum time the allocation\n # must be in the healthy state before it is marked as healthy and unblocks\n # further allocations from being updated.\n min_healthy_time = \"10s\"\n\n # The \"healthy_deadline\" parameter specifies the deadline in which the\n # allocation must be marked as healthy after which the allocation is\n # automatically transitioned to unhealthy. Transitioning to unhealthy will\n # fail the deployment and potentially roll back the job if \"auto_revert\" is\n # set to true.\n healthy_deadline = \"3m\"\n\n # The \"progress_deadline\" parameter specifies the deadline in which an\n # allocation must be marked as healthy. The deadline begins when the first\n # allocation for the deployment is created and is reset whenever an allocation\n # as part of the deployment transitions to a healthy state. If no allocation\n # transitions to the healthy state before the progress deadline, the\n # deployment is marked as failed.\n progress_deadline = \"10m\"\n\n # The \"auto_revert\" parameter specifies if the job should auto-revert to the\n # last stable job on deployment failure. A job is marked as stable if all the\n # allocations as part of its deployment were marked healthy.\n auto_revert = false\n\n # The \"canary\" parameter specifies that changes to the job that would result\n # in destructive updates should create the specified number of canaries\n # without stopping any previous allocations. Once the operator determines the\n # canaries are healthy, they can be promoted which unblocks a rolling update\n # of the remaining allocations at a rate of \"max_parallel\".\n #\n # Further, setting \"canary\" equal to the count of the task group allows\n # blue/green deployments. When the job is updated, a full set of the new\n # version is deployed and upon promotion the old version is stopped.\n canary = 0\n }\n\n # The reschedule stanza specifies the group's rescheduling strategy. If\n # specified at the job level, the configuration will apply to all groups\n # within the job. If the reschedule stanza is present on both the job and the\n # group, they are merged with the group stanza taking the highest precedence\n # and then the job.\n reschedule {\n delay = \"30s\"\n delay_function = \"constant\"\n unlimited = true\n }\n\n\n # The \"group\" stanza defines a series of tasks that should be co-located on\n # the same Nomad client. Any task within a group will be placed on the same\n # client.\n #\n # For more information and examples on the \"group\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/group.html\n #\n group \"prod-group1-nginx\" {\n # The \"count\" parameter specifies the number of the task groups that should\n # be running under this group. This value must be non-negative and defaults\n # to 1.\n count = 1\n\n # The restart stanza configures a tasks's behavior on task failure. Restarts\n # happen on the client that is running the task.\n restart {\n interval = \"10m\"\n attempts = 2\n delay = \"15s\"\n mode = \"fail\"\n }\n\n # All groups in this job should be scheduled on different hosts.\n constraint {\n operator = \"distinct_hosts\"\n value = \"false\"\n }\n\n # The volume stanza allows the group to specify that it requires a given\n # volume from the cluster.\n #\n # For more information and examples on the \"volume\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/volume\n volume \"prod-volume1-storage\" {\n type = \"host\"\n read_only = false\n source = \"prod-volume-data1-1\"\n }\n\n # The \"task\" stanza creates an individual unit of work, such as a Docker\n # container, web application, or batch processing.\n #\n # For more information and examples on the \"task\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/task.html\n #\n task \"prod-task1-nginx\" {\n # The \"driver\" parameter specifies the task driver that should be used to\n # run the task.\n driver = \"docker\"\n\n volume_mount {\n volume = \"prod-volume1-storage\"\n destination = \"/data/\"\n read_only = true\n }\n\n # The \"config\" stanza specifies the driver configuration, which is passed\n # directly to the driver to start the task. The details of configurations\n # are specific to each driver, so please see specific driver\n # documentation for more information.\n config {\n image = \"nginx:stable\"\n dns_servers = [ \"${attr.unique.network.ip-address}\" ]\n port_map {\n https = 443\n }\n privileged = false\n volumes = [\n \"/etc/consul.d/ssl/consul.pem:/etc/ssl/certs/nginx-cert.pem\",\n \"/etc/consul.d/ssl/consul-key.pem:/etc/ssl/private/nginx-key.pem\",\n \"custom/upstream.conf:/etc/nginx/conf.d/upstream.conf\",\n \"custom/logs.conf:/etc/nginx/conf.d/logs.conf\",\n \"custom/docs.conf:/etc/nginx/conf.d/docs.conf\"\n ]\n }\n\n # The \"template\" stanza instructs Nomad to manage a template, such as\n # a configuration file or script. This template can optionally pull data\n # from Consul or Vault to populate runtime configuration data.\n #\n # For more information and examples on the \"template\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/template.html\n #\n template {\n data = \u003c\u003cEOH\n upstream storage {\n server storage0.storage.service.consul:9000;\n server storage1.storage.service.consul:9000;\n server storage2.storage.service.consul:9000;\n server storage3.storage.service.consul:9000;\n }\n EOH\n destination = \"custom/upstream.conf\"\n }\n template {\n data = \u003c\u003cEOH\n server {\n listen 443 ssl default_server;\n server_name logs.nginx.service.consul;\n keepalive_timeout 70;\n ssl_session_cache shared:SSL:10m;\n ssl_session_timeout 10m;\n ssl_protocols TLSv1.2;\n ssl_prefer_server_ciphers on;\n ssl_ciphers \"ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384\";\n ssl_certificate /etc/ssl/certs/nginx-cert.pem;\n ssl_certificate_key /etc/ssl/private/nginx-key.pem;\n location / {\n chunked_transfer_encoding off;\n proxy_connect_timeout 300;\n proxy_http_version 1.1;\n proxy_set_header Host $host:$server_port;\n proxy_set_header Connection \"\";\n proxy_pass http://storage/logs.fd.io/;\n server_name_in_redirect off;\n }\n location ~ (.*html.gz)$ {\n add_header Content-Encoding gzip;\n add_header Content-Type text/html;\n chunked_transfer_encoding off;\n proxy_connect_timeout 300;\n proxy_http_version 1.1;\n proxy_set_header Host $host:$server_port;\n proxy_set_header Connection \"\";\n proxy_pass http://storage/logs.fd.io/$1;\n server_name_in_redirect off;\n }\n location ~ (.*txt.gz|.*log.gz)$ {\n add_header Content-Encoding gzip;\n add_header Content-Type text/plain;\n chunked_transfer_encoding off;\n proxy_connect_timeout 300;\n proxy_http_version 1.1;\n proxy_set_header Host $host:$server_port;\n proxy_set_header Connection \"\";\n proxy_pass http://storage/logs.fd.io/$1;\n server_name_in_redirect off;\n }\n location ~ (.*xml.gz)$ {\n add_header Content-Encoding gzip;\n add_header Content-Type application/xml;\n chunked_transfer_encoding off;\n proxy_connect_timeout 300;\n proxy_http_version 1.1;\n proxy_set_header Host $host:$server_port;\n proxy_set_header Connection \"\";\n proxy_pass http://storage/logs.fd.io/$1;\n server_name_in_redirect off;\n }\n }\n EOH\n destination = \"custom/logs.conf\"\n }\n template {\n data = \u003c\u003cEOH\n server {\n listen 443 ssl;\n server_name docs.nginx.service.consul;\n keepalive_timeout 70;\n ssl_session_cache shared:SSL:10m;\n ssl_session_timeout 10m;\n ssl_protocols TLSv1.2;\n ssl_prefer_server_ciphers on;\n ssl_ciphers \"ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384\";\n ssl_certificate /etc/ssl/certs/nginx-cert.pem;\n ssl_certificate_key /etc/ssl/private/nginx-key.pem;\n location / {\n chunked_transfer_encoding off;\n proxy_connect_timeout 300;\n proxy_http_version 1.1;\n proxy_set_header Host $host:$server_port;\n proxy_set_header Connection \"\";\n proxy_pass http://storage/docs.fd.io/;\n server_name_in_redirect off;\n }\n }\n EOH\n destination = \"custom/docs.conf\"\n }\n\n # The service stanza instructs Nomad to register a service with Consul.\n #\n # For more information and examples on the \"task\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/service.html\n #\n service {\n name = \"nginx\"\n port = \"https\"\n tags = [ \"docs\", \"logs\" ]\n }\n\n # The \"resources\" stanza describes the requirements a task needs to\n # execute. Resource requirements include memory, network, cpu, and more.\n # This ensures the task will execute on a machine that contains enough\n # resource capacity.\n #\n # For more information and examples on the \"resources\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/resources.html\n #\n resources {\n cpu = 1000\n memory = 1024\n network {\n mode = \"bridge\"\n port \"https\" {\n static = 443\n }\n }\n }\n }\n }\n}", + "template": "job \"${job_name}\" {\n # The \"region\" parameter specifies the region in which to execute the job.\n # If omitted, this inherits the default region name of \"global\".\n # region = \"global\"\n #\n # The \"datacenters\" parameter specifies the list of datacenters which should\n # be considered when placing this task. This must be provided.\n datacenters = \"${datacenters}\"\n\n # The \"type\" parameter controls the type of job, which impacts the scheduler's\n # decision on placement. This configuration is optional and defaults to\n # \"service\". For a full list of job types and their differences, please see\n # the online documentation.\n #\n # For more information, please see the online documentation at:\n #\n # https://www.nomadproject.io/docs/jobspec/schedulers.html\n #\n type = \"service\"\n\n update {\n # The \"max_parallel\" parameter specifies the maximum number of updates to\n # perform in parallel. In this case, this specifies to update a single task\n # at a time.\n max_parallel = 0\n\n # The \"min_healthy_time\" parameter specifies the minimum time the allocation\n # must be in the healthy state before it is marked as healthy and unblocks\n # further allocations from being updated.\n min_healthy_time = \"10s\"\n\n # The \"healthy_deadline\" parameter specifies the deadline in which the\n # allocation must be marked as healthy after which the allocation is\n # automatically transitioned to unhealthy. Transitioning to unhealthy will\n # fail the deployment and potentially roll back the job if \"auto_revert\" is\n # set to true.\n healthy_deadline = \"3m\"\n\n # The \"progress_deadline\" parameter specifies the deadline in which an\n # allocation must be marked as healthy. The deadline begins when the first\n # allocation for the deployment is created and is reset whenever an allocation\n # as part of the deployment transitions to a healthy state. If no allocation\n # transitions to the healthy state before the progress deadline, the\n # deployment is marked as failed.\n progress_deadline = \"10m\"\n\n # The \"auto_revert\" parameter specifies if the job should auto-revert to the\n # last stable job on deployment failure. A job is marked as stable if all the\n # allocations as part of its deployment were marked healthy.\n auto_revert = false\n\n # The \"canary\" parameter specifies that changes to the job that would result\n # in destructive updates should create the specified number of canaries\n # without stopping any previous allocations. Once the operator determines the\n # canaries are healthy, they can be promoted which unblocks a rolling update\n # of the remaining allocations at a rate of \"max_parallel\".\n #\n # Further, setting \"canary\" equal to the count of the task group allows\n # blue/green deployments. When the job is updated, a full set of the new\n # version is deployed and upon promotion the old version is stopped.\n canary = 0\n }\n\n # The reschedule stanza specifies the group's rescheduling strategy. If\n # specified at the job level, the configuration will apply to all groups\n # within the job. If the reschedule stanza is present on both the job and the\n # group, they are merged with the group stanza taking the highest precedence\n # and then the job.\n reschedule {\n delay = \"30s\"\n delay_function = \"constant\"\n unlimited = true\n }\n\n\n # The \"group\" stanza defines a series of tasks that should be co-located on\n # the same Nomad client. Any task within a group will be placed on the same\n # client.\n #\n # For more information and examples on the \"group\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/group.html\n #\n group \"prod-group1-nginx\" {\n # The \"count\" parameter specifies the number of the task groups that should\n # be running under this group. This value must be non-negative and defaults\n # to 1.\n count = 1\n\n # The restart stanza configures a tasks's behavior on task failure. Restarts\n # happen on the client that is running the task.\n restart {\n interval = \"10m\"\n attempts = 2\n delay = \"15s\"\n mode = \"fail\"\n }\n\n # All groups in this job should be scheduled on different hosts.\n constraint {\n operator = \"distinct_hosts\"\n value = \"false\"\n }\n\n # The volume stanza allows the group to specify that it requires a given\n # volume from the cluster.\n #\n # For more information and examples on the \"volume\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/volume\n volume \"prod-volume1-storage\" {\n type = \"host\"\n read_only = false\n source = \"prod-volume-data1-1\"\n }\n\n # The \"task\" stanza creates an individual unit of work, such as a Docker\n # container, web application, or batch processing.\n #\n # For more information and examples on the \"task\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/task.html\n #\n task \"prod-task1-nginx\" {\n # The \"driver\" parameter specifies the task driver that should be used to\n # run the task.\n driver = \"docker\"\n\n volume_mount {\n volume = \"prod-volume1-storage\"\n destination = \"/data/\"\n read_only = true\n }\n\n # The \"config\" stanza specifies the driver configuration, which is passed\n # directly to the driver to start the task. The details of configurations\n # are specific to each driver, so please see specific driver\n # documentation for more information.\n config {\n image = \"nginx:stable\"\n dns_servers = [ \"$${attr.unique.network.ip-address}\" ]\n port_map {\n https = 443\n }\n privileged = false\n volumes = [\n \"/etc/consul.d/ssl/consul.pem:/etc/ssl/certs/nginx-cert.pem\",\n \"/etc/consul.d/ssl/consul-key.pem:/etc/ssl/private/nginx-key.pem\",\n \"custom/upstream.conf:/etc/nginx/conf.d/upstream.conf\",\n \"custom/logs.conf:/etc/nginx/conf.d/logs.conf\",\n \"custom/docs.conf:/etc/nginx/conf.d/docs.conf\"\n ]\n }\n\n # The \"template\" stanza instructs Nomad to manage a template, such as\n # a configuration file or script. This template can optionally pull data\n # from Consul or Vault to populate runtime configuration data.\n #\n # For more information and examples on the \"template\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/template.html\n #\n template {\n data = \u003c\u003cEOH\n upstream storage {\n server storage0.storage.service.consul:9000;\n server storage1.storage.service.consul:9000;\n server storage2.storage.service.consul:9000;\n server storage3.storage.service.consul:9000;\n }\n EOH\n destination = \"custom/upstream.conf\"\n }\n template {\n data = \u003c\u003cEOH\n server {\n listen 443 ssl default_server;\n server_name logs.nginx.service.consul;\n keepalive_timeout 70;\n ssl_session_cache shared:SSL:10m;\n ssl_session_timeout 10m;\n ssl_protocols TLSv1.2;\n ssl_prefer_server_ciphers on;\n ssl_ciphers \"ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384\";\n ssl_certificate /etc/ssl/certs/nginx-cert.pem;\n ssl_certificate_key /etc/ssl/private/nginx-key.pem;\n location / {\n chunked_transfer_encoding off;\n proxy_connect_timeout 300;\n proxy_http_version 1.1;\n proxy_set_header Host $host:$server_port;\n proxy_set_header Connection \"\";\n proxy_pass http://storage/logs.fd.io/;\n server_name_in_redirect off;\n }\n location ~ (.*html.gz)$ {\n add_header Content-Encoding gzip;\n add_header Content-Type text/html;\n chunked_transfer_encoding off;\n proxy_connect_timeout 300;\n proxy_http_version 1.1;\n proxy_set_header Host $host:$server_port;\n proxy_set_header Connection \"\";\n proxy_pass http://storage/logs.fd.io/$1;\n server_name_in_redirect off;\n }\n location ~ (.*txt.gz|.*log.gz)$ {\n add_header Content-Encoding gzip;\n add_header Content-Type text/plain;\n chunked_transfer_encoding off;\n proxy_connect_timeout 300;\n proxy_http_version 1.1;\n proxy_set_header Host $host:$server_port;\n proxy_set_header Connection \"\";\n proxy_pass http://storage/logs.fd.io/$1;\n server_name_in_redirect off;\n }\n location ~ (.*xml.gz)$ {\n add_header Content-Encoding gzip;\n add_header Content-Type application/xml;\n chunked_transfer_encoding off;\n proxy_connect_timeout 300;\n proxy_http_version 1.1;\n proxy_set_header Host $host:$server_port;\n proxy_set_header Connection \"\";\n proxy_pass http://storage/logs.fd.io/$1;\n server_name_in_redirect off;\n }\n }\n EOH\n destination = \"custom/logs.conf\"\n }\n template {\n data = \u003c\u003cEOH\n server {\n listen 443 ssl;\n server_name docs.nginx.service.consul;\n keepalive_timeout 70;\n ssl_session_cache shared:SSL:10m;\n ssl_session_timeout 10m;\n ssl_protocols TLSv1.2;\n ssl_prefer_server_ciphers on;\n ssl_ciphers \"ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384\";\n ssl_certificate /etc/ssl/certs/nginx-cert.pem;\n ssl_certificate_key /etc/ssl/private/nginx-key.pem;\n location / {\n chunked_transfer_encoding off;\n proxy_connect_timeout 300;\n proxy_http_version 1.1;\n proxy_set_header Host $host:$server_port;\n proxy_set_header Connection \"\";\n proxy_pass http://storage/docs.fd.io/;\n server_name_in_redirect off;\n }\n }\n EOH\n destination = \"custom/docs.conf\"\n }\n\n # The service stanza instructs Nomad to register a service with Consul.\n #\n # For more information and examples on the \"task\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/service.html\n #\n service {\n name = \"nginx\"\n port = \"https\"\n tags = [ \"docs\", \"logs\" ]\n }\n\n # The \"resources\" stanza describes the requirements a task needs to\n # execute. Resource requirements include memory, network, cpu, and more.\n # This ensures the task will execute on a machine that contains enough\n # resource capacity.\n #\n # For more information and examples on the \"resources\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/resources.html\n #\n resources {\n cpu = 1000\n memory = 1024\n network {\n mode = \"bridge\"\n port \"https\" {\n static = 443\n }\n }\n }\n }\n }\n}", + "vars": { + "datacenters": "yul1", + "job_name": "prod-nginx" + } + }, + "sensitive_attributes": [] + } + ] + }, + { + "module": "module.nginx", + "mode": "managed", + "type": "nomad_job", + "name": "nomad_job_nginx", + "provider": "provider[\"registry.terraform.io/hashicorp/nomad\"].yul1", + "instances": [ + { + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "de9af589-8f0b-6634-7199-c661f8f17df0", + "6d09eda3-d917-51f8-1d75-619f4f8088ec" + ], + "datacenters": [ + "yul1" + ], + "deployment_id": "", + "deployment_status": "", + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": false, + "id": "prod-nginx", + "jobspec": "job \"prod-nginx\" {\n # The \"region\" parameter specifies the region in which to execute the job.\n # If omitted, this inherits the default region name of \"global\".\n # region = \"global\"\n #\n # The \"datacenters\" parameter specifies the list of datacenters which should\n # be considered when placing this task. This must be provided.\n datacenters = \"yul1\"\n\n # The \"type\" parameter controls the type of job, which impacts the scheduler's\n # decision on placement. This configuration is optional and defaults to\n # \"service\". For a full list of job types and their differences, please see\n # the online documentation.\n #\n # For more information, please see the online documentation at:\n #\n # https://www.nomadproject.io/docs/jobspec/schedulers.html\n #\n type = \"service\"\n\n update {\n # The \"max_parallel\" parameter specifies the maximum number of updates to\n # perform in parallel. In this case, this specifies to update a single task\n # at a time.\n max_parallel = 0\n\n # The \"min_healthy_time\" parameter specifies the minimum time the allocation\n # must be in the healthy state before it is marked as healthy and unblocks\n # further allocations from being updated.\n min_healthy_time = \"10s\"\n\n # The \"healthy_deadline\" parameter specifies the deadline in which the\n # allocation must be marked as healthy after which the allocation is\n # automatically transitioned to unhealthy. Transitioning to unhealthy will\n # fail the deployment and potentially roll back the job if \"auto_revert\" is\n # set to true.\n healthy_deadline = \"3m\"\n\n # The \"progress_deadline\" parameter specifies the deadline in which an\n # allocation must be marked as healthy. The deadline begins when the first\n # allocation for the deployment is created and is reset whenever an allocation\n # as part of the deployment transitions to a healthy state. If no allocation\n # transitions to the healthy state before the progress deadline, the\n # deployment is marked as failed.\n progress_deadline = \"10m\"\n\n # The \"auto_revert\" parameter specifies if the job should auto-revert to the\n # last stable job on deployment failure. A job is marked as stable if all the\n # allocations as part of its deployment were marked healthy.\n auto_revert = false\n\n # The \"canary\" parameter specifies that changes to the job that would result\n # in destructive updates should create the specified number of canaries\n # without stopping any previous allocations. Once the operator determines the\n # canaries are healthy, they can be promoted which unblocks a rolling update\n # of the remaining allocations at a rate of \"max_parallel\".\n #\n # Further, setting \"canary\" equal to the count of the task group allows\n # blue/green deployments. When the job is updated, a full set of the new\n # version is deployed and upon promotion the old version is stopped.\n canary = 0\n }\n\n # The reschedule stanza specifies the group's rescheduling strategy. If\n # specified at the job level, the configuration will apply to all groups\n # within the job. If the reschedule stanza is present on both the job and the\n # group, they are merged with the group stanza taking the highest precedence\n # and then the job.\n reschedule {\n delay = \"30s\"\n delay_function = \"constant\"\n unlimited = true\n }\n\n\n # The \"group\" stanza defines a series of tasks that should be co-located on\n # the same Nomad client. Any task within a group will be placed on the same\n # client.\n #\n # For more information and examples on the \"group\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/group.html\n #\n group \"prod-group1-nginx\" {\n # The \"count\" parameter specifies the number of the task groups that should\n # be running under this group. This value must be non-negative and defaults\n # to 1.\n count = 1\n\n # The restart stanza configures a tasks's behavior on task failure. Restarts\n # happen on the client that is running the task.\n restart {\n interval = \"10m\"\n attempts = 2\n delay = \"15s\"\n mode = \"fail\"\n }\n\n # All groups in this job should be scheduled on different hosts.\n constraint {\n operator = \"distinct_hosts\"\n value = \"false\"\n }\n\n # The volume stanza allows the group to specify that it requires a given\n # volume from the cluster.\n #\n # For more information and examples on the \"volume\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/volume\n volume \"prod-volume1-storage\" {\n type = \"host\"\n read_only = false\n source = \"prod-volume-data1-1\"\n }\n\n # The \"task\" stanza creates an individual unit of work, such as a Docker\n # container, web application, or batch processing.\n #\n # For more information and examples on the \"task\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/task.html\n #\n task \"prod-task1-nginx\" {\n # The \"driver\" parameter specifies the task driver that should be used to\n # run the task.\n driver = \"docker\"\n\n volume_mount {\n volume = \"prod-volume1-storage\"\n destination = \"/data/\"\n read_only = true\n }\n\n # The \"config\" stanza specifies the driver configuration, which is passed\n # directly to the driver to start the task. The details of configurations\n # are specific to each driver, so please see specific driver\n # documentation for more information.\n config {\n image = \"nginx:stable\"\n dns_servers = [ \"${attr.unique.network.ip-address}\" ]\n port_map {\n https = 443\n }\n privileged = false\n volumes = [\n \"/etc/consul.d/ssl/consul.pem:/etc/ssl/certs/nginx-cert.pem\",\n \"/etc/consul.d/ssl/consul-key.pem:/etc/ssl/private/nginx-key.pem\",\n \"custom/upstream.conf:/etc/nginx/conf.d/upstream.conf\",\n \"custom/logs.conf:/etc/nginx/conf.d/logs.conf\",\n \"custom/docs.conf:/etc/nginx/conf.d/docs.conf\"\n ]\n }\n\n # The \"template\" stanza instructs Nomad to manage a template, such as\n # a configuration file or script. This template can optionally pull data\n # from Consul or Vault to populate runtime configuration data.\n #\n # For more information and examples on the \"template\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/template.html\n #\n template {\n data = \u003c\u003cEOH\n upstream storage {\n server storage0.storage.service.consul:9000;\n server storage1.storage.service.consul:9000;\n server storage2.storage.service.consul:9000;\n server storage3.storage.service.consul:9000;\n }\n EOH\n destination = \"custom/upstream.conf\"\n }\n template {\n data = \u003c\u003cEOH\n server {\n listen 443 ssl default_server;\n server_name logs.nginx.service.consul;\n keepalive_timeout 70;\n ssl_session_cache shared:SSL:10m;\n ssl_session_timeout 10m;\n ssl_protocols TLSv1.2;\n ssl_prefer_server_ciphers on;\n ssl_ciphers \"ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384\";\n ssl_certificate /etc/ssl/certs/nginx-cert.pem;\n ssl_certificate_key /etc/ssl/private/nginx-key.pem;\n location / {\n chunked_transfer_encoding off;\n proxy_connect_timeout 300;\n proxy_http_version 1.1;\n proxy_set_header Host $host:$server_port;\n proxy_set_header Connection \"\";\n proxy_pass http://storage/logs.fd.io/;\n server_name_in_redirect off;\n }\n location ~ (.*html.gz)$ {\n add_header Content-Encoding gzip;\n add_header Content-Type text/html;\n chunked_transfer_encoding off;\n proxy_connect_timeout 300;\n proxy_http_version 1.1;\n proxy_set_header Host $host:$server_port;\n proxy_set_header Connection \"\";\n proxy_pass http://storage/logs.fd.io/$1;\n server_name_in_redirect off;\n }\n location ~ (.*txt.gz|.*log.gz)$ {\n add_header Content-Encoding gzip;\n add_header Content-Type text/plain;\n chunked_transfer_encoding off;\n proxy_connect_timeout 300;\n proxy_http_version 1.1;\n proxy_set_header Host $host:$server_port;\n proxy_set_header Connection \"\";\n proxy_pass http://storage/logs.fd.io/$1;\n server_name_in_redirect off;\n }\n location ~ (.*xml.gz)$ {\n add_header Content-Encoding gzip;\n add_header Content-Type application/xml;\n chunked_transfer_encoding off;\n proxy_connect_timeout 300;\n proxy_http_version 1.1;\n proxy_set_header Host $host:$server_port;\n proxy_set_header Connection \"\";\n proxy_pass http://storage/logs.fd.io/$1;\n server_name_in_redirect off;\n }\n }\n EOH\n destination = \"custom/logs.conf\"\n }\n template {\n data = \u003c\u003cEOH\n server {\n listen 443 ssl;\n server_name docs.nginx.service.consul;\n keepalive_timeout 70;\n ssl_session_cache shared:SSL:10m;\n ssl_session_timeout 10m;\n ssl_protocols TLSv1.2;\n ssl_prefer_server_ciphers on;\n ssl_ciphers \"ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384\";\n ssl_certificate /etc/ssl/certs/nginx-cert.pem;\n ssl_certificate_key /etc/ssl/private/nginx-key.pem;\n location / {\n chunked_transfer_encoding off;\n proxy_connect_timeout 300;\n proxy_http_version 1.1;\n proxy_set_header Host $host:$server_port;\n proxy_set_header Connection \"\";\n proxy_pass http://storage/docs.fd.io/;\n server_name_in_redirect off;\n }\n }\n EOH\n destination = \"custom/docs.conf\"\n }\n\n # The service stanza instructs Nomad to register a service with Consul.\n #\n # For more information and examples on the \"task\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/service.html\n #\n service {\n name = \"nginx\"\n port = \"https\"\n tags = [ \"docs\", \"logs\" ]\n }\n\n # The \"resources\" stanza describes the requirements a task needs to\n # execute. Resource requirements include memory, network, cpu, and more.\n # This ensures the task will execute on a machine that contains enough\n # resource capacity.\n #\n # For more information and examples on the \"resources\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/resources.html\n #\n resources {\n cpu = 1000\n memory = 1024\n network {\n mode = \"bridge\"\n port \"https\" {\n static = 443\n }\n }\n }\n }\n }\n}", + "json": null, + "modify_index": "5890718", + "name": "prod-nginx", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "prod-group1-nginx", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "prod-task1-nginx", + "volume_mounts": [ + { + "destination": "/data/", + "read_only": true, + "volume": "prod-volume1-storage" + } + ] + } + ], + "volumes": [ + { + "name": "prod-volume1-storage", + "read_only": false, + "source": "prod-volume-data1-1", + "type": "host" + } + ] + } + ], + "type": "service" + }, + "sensitive_attributes": [], + "private": "bnVsbA==", + "dependencies": [ + "module.nginx.data.template_file.nomad_job_nginx" + ] + } + ] + } + ] +} diff --git a/terraform-ci-infra/1n_nmd/terraform.tfstate.backup b/terraform-ci-infra/1n_nmd/terraform.tfstate.backup new file mode 100644 index 0000000000..ace1fbd01b --- /dev/null +++ b/terraform-ci-infra/1n_nmd/terraform.tfstate.backup @@ -0,0 +1,281 @@ +{ + "version": 4, + "terraform_version": "0.14.0", + "serial": 211, + "lineage": "e4e7f30a-652d-7a31-e31c-5e3a3388c9b9", + "outputs": {}, + "resources": [ + { + "module": "module.prod_storage", + "mode": "data", + "type": "template_file", + "name": "nomad_job_mc", + "provider": "provider[\"registry.terraform.io/hashicorp/template\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "filename": null, + "id": "9afc30ab9aba445c712a1efa0c41476e18825f4b172ff31046c0d4f735d498e5", + "rendered": "job \"prod-mc\" {\n # The \"region\" parameter specifies the region in which to execute the job.\n # If omitted, this inherits the default region name of \"global\".\n # region = \"global\"\n #\n # The \"datacenters\" parameter specifies the list of datacenters which should\n # be considered when placing this task. This must be provided.\n datacenters = \"yul1\"\n\n # The \"type\" parameter controls the type of job, which impacts the scheduler's\n # decision on placement. This configuration is optional and defaults to\n # \"service\". For a full list of job types and their differences, please see\n # the online documentation.\n #\n # For more information, please see the online documentation at:\n #\n # https://www.nomadproject.io/docs/jobspec/schedulers.html\n #\n type = \"batch\"\n\n # The \"group\" stanza defines a series of tasks that should be co-located on\n # the same Nomad client. Any task within a group will be placed on the same\n # client.\n #\n # For more information and examples on the \"group\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/group.html\n #\n group \"prod-group1-mc\" {\n task \"prod-task1-create-buckets\" {\n # The \"driver\" parameter specifies the task driver that should be used to\n # run the task.\n driver = \"docker\"\n\n \n\n # The \"config\" stanza specifies the driver configuration, which is passed\n # directly to the driver to start the task. The details of configurations\n # are specific to each driver, so please see specific driver\n # documentation for more information.\n config {\n image = \"minio/mc:RELEASE.2020-12-10T01-26-17Z\"\n entrypoint = [\n \"/bin/sh\",\n \"-c\",\n \"mc config host add LOCALMINIO http://storage.service.consul:9000 $MINIO_ACCESS_KEY $MINIO_SECRET_KEY \u0026\u0026 mc mb -p LOCALMINIO/logs.fd.io LOCALMINIO/docs.fd.io ; mc policy set public LOCALMINIO/logs.fd.io mc policy set public LOCALMINIO/docs.fd.io\"\n ]\n dns_servers = [ \"${attr.unique.network.ip-address}\" ]\n privileged = false\n }\n\n # The env stanza configures a list of environment variables to populate\n # the task's environment before starting.\n env {\n \n MINIO_ACCESS_KEY = \"minio\"\n MINIO_SECRET_KEY = \"minio123\"\n \n \n }\n }\n }\n}\n", + "template": "job \"${job_name}\" {\n # The \"region\" parameter specifies the region in which to execute the job.\n # If omitted, this inherits the default region name of \"global\".\n # region = \"global\"\n #\n # The \"datacenters\" parameter specifies the list of datacenters which should\n # be considered when placing this task. This must be provided.\n datacenters = \"${datacenters}\"\n\n # The \"type\" parameter controls the type of job, which impacts the scheduler's\n # decision on placement. This configuration is optional and defaults to\n # \"service\". For a full list of job types and their differences, please see\n # the online documentation.\n #\n # For more information, please see the online documentation at:\n #\n # https://www.nomadproject.io/docs/jobspec/schedulers.html\n #\n type = \"batch\"\n\n # The \"group\" stanza defines a series of tasks that should be co-located on\n # the same Nomad client. Any task within a group will be placed on the same\n # client.\n #\n # For more information and examples on the \"group\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/group.html\n #\n group \"prod-group1-mc\" {\n task \"prod-task1-create-buckets\" {\n # The \"driver\" parameter specifies the task driver that should be used to\n # run the task.\n driver = \"docker\"\n\n %{ if use_vault_provider }\n vault {\n policies = \"${vault_kv_policy_name}\"\n }\n %{ endif }\n\n # The \"config\" stanza specifies the driver configuration, which is passed\n # directly to the driver to start the task. The details of configurations\n # are specific to each driver, so please see specific driver\n # documentation for more information.\n config {\n image = \"${image}\"\n entrypoint = [\n \"/bin/sh\",\n \"-c\",\n \"${command}\"\n ]\n dns_servers = [ \"$${attr.unique.network.ip-address}\" ]\n privileged = false\n }\n\n # The env stanza configures a list of environment variables to populate\n # the task's environment before starting.\n env {\n %{ if use_vault_provider }\n {{ with secret \"${vault_kv_path}\" }}\n MINIO_ACCESS_KEY = \"{{ .Data.data.${vault_kv_field_access_key} }}\"\n MINIO_SECRET_KEY = \"{{ .Data.data.${vault_kv_field_secret_key} }}\"\n {{ end }}\n %{ else }\n MINIO_ACCESS_KEY = \"${access_key}\"\n MINIO_SECRET_KEY = \"${secret_key}\"\n %{ endif }\n ${ envs }\n }\n }\n }\n}\n", + "vars": { + "access_key": "minio", + "command": "mc config host add LOCALMINIO http://storage.service.consul:9000 $MINIO_ACCESS_KEY $MINIO_SECRET_KEY \u0026\u0026 mc mb -p LOCALMINIO/logs.fd.io LOCALMINIO/docs.fd.io ; mc policy set public LOCALMINIO/logs.fd.io mc policy set public LOCALMINIO/docs.fd.io", + "datacenters": "yul1", + "envs": "", + "image": "minio/mc:RELEASE.2020-12-10T01-26-17Z", + "job_name": "prod-mc", + "minio_port": "9000", + "minio_service_name": "storage", + "secret_key": "minio123", + "service_name": "mc", + "use_vault_provider": "false" + } + }, + "sensitive_attributes": [] + } + ] + }, + { + "module": "module.prod_storage", + "mode": "data", + "type": "template_file", + "name": "nomad_job_minio", + "provider": "provider[\"registry.terraform.io/hashicorp/template\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "filename": null, + "id": "7316d70d802832981e08dc3c9b6b9637d8998e7cf173786b341f788dc513512b", + "rendered": "job \"prod-minio\" {\n # The \"region\" parameter specifies the region in which to execute the job.\n # If omitted, this inherits the default region name of \"global\".\n # region = \"global\"\n #\n # The \"datacenters\" parameter specifies the list of datacenters which should\n # be considered when placing this task. This must be provided.\n datacenters = \"yul1\"\n\n # The \"type\" parameter controls the type of job, which impacts the scheduler's\n # decision on placement. This configuration is optional and defaults to\n # \"service\". For a full list of job types and their differences, please see\n # the online documentation.\n #\n # For more information, please see the online documentation at:\n #\n # https://www.nomadproject.io/docs/jobspec/schedulers.html\n #\n type = \"service\"\n\n update {\n # The \"max_parallel\" parameter specifies the maximum number of updates to\n # perform in parallel. In this case, this specifies to update a single task\n # at a time.\n max_parallel = 1\n\n health_check = \"checks\"\n\n # The \"min_healthy_time\" parameter specifies the minimum time the allocation\n # must be in the healthy state before it is marked as healthy and unblocks\n # further allocations from being updated.\n min_healthy_time = \"10s\"\n\n # The \"healthy_deadline\" parameter specifies the deadline in which the\n # allocation must be marked as healthy after which the allocation is\n # automatically transitioned to unhealthy. Transitioning to unhealthy will\n # fail the deployment and potentially roll back the job if \"auto_revert\" is\n # set to true.\n healthy_deadline = \"3m\"\n\n # The \"progress_deadline\" parameter specifies the deadline in which an\n # allocation must be marked as healthy. The deadline begins when the first\n # allocation for the deployment is created and is reset whenever an allocation\n # as part of the deployment transitions to a healthy state. If no allocation\n # transitions to the healthy state before the progress deadline, the\n # deployment is marked as failed.\n progress_deadline = \"10m\"\n\n\n # The \"canary\" parameter specifies that changes to the job that would result\n # in destructive updates should create the specified number of canaries\n # without stopping any previous allocations. Once the operator determines the\n # canaries are healthy, they can be promoted which unblocks a rolling update\n # of the remaining allocations at a rate of \"max_parallel\".\n #\n # Further, setting \"canary\" equal to the count of the task group allows\n # blue/green deployments. When the job is updated, a full set of the new\n # version is deployed and upon promotion the old version is stopped.\n canary = 1\n\n # Specifies if the job should auto-promote to the canary version when all\n # canaries become healthy during a deployment. Defaults to false which means\n # canaries must be manually updated with the nomad deployment promote\n # command.\n auto_promote = true\n\n # The \"auto_revert\" parameter specifies if the job should auto-revert to the\n # last stable job on deployment failure. A job is marked as stable if all the\n # allocations as part of its deployment were marked healthy.\n auto_revert = true\n\n }\n\n # All groups in this job should be scheduled on different hosts.\n constraint {\n operator = \"distinct_hosts\"\n value = \"true\"\n }\n\n # The \"group\" stanza defines a series of tasks that should be co-located on\n # the same Nomad client. Any task within a group will be placed on the same\n # client.\n #\n # For more information and examples on the \"group\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/group.html\n #\n group \"prod-group1-minio\" {\n # The \"count\" parameter specifies the number of the task groups that should\n # be running under this group. This value must be non-negative and defaults\n # to 1.\n count = 4\n\n # https://www.nomadproject.io/docs/job-specification/volume\n \n volume \"prod-volume1-minio\" {\n type = \"host\"\n read_only = false\n source = \"prod-volume-data1-1\"\n }\n \n\n # The \"task\" stanza creates an individual unit of work, such as a Docker\n # container, web application, or batch processing.\n #\n # For more information and examples on the \"task\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/task.html\n #\n task \"prod-task1-minio\" {\n # The \"driver\" parameter specifies the task driver that should be used to\n # run the task.\n driver = \"docker\"\n\n \n volume_mount {\n volume = \"prod-volume1-minio\"\n destination = \"/data/\"\n read_only = false\n }\n \n\n \n\n # The \"config\" stanza specifies the driver configuration, which is passed\n # directly to the driver to start the task. The details of configurations\n # are specific to each driver, so please see specific driver\n # documentation for more information.\n config {\n image = \"minio/minio:RELEASE.2020-12-03T05-49-24Z\"\n dns_servers = [ \"${attr.unique.network.ip-address}\" ]\n network_mode = \"host\"\n command = \"server\"\n args = [ \"http://10.32.8.1{4...7}:9000/data/\" ]\n port_map {\n http = 9000\n }\n privileged = false\n }\n\n # The env stanza configures a list of environment variables to populate\n # the task's environment before starting.\n env {\n\n MINIO_ACCESS_KEY = \"minio\"\n MINIO_SECRET_KEY = \"minio123\"\n\n MINIO_BROWSER=\"off\"\n }\n\n # The service stanza instructs Nomad to register a service with Consul.\n #\n # For more information and examples on the \"task\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/service.html\n #\n service {\n name = \"storage\"\n port = \"http\"\n tags = [ \"storage${NOMAD_ALLOC_INDEX}\" ]\n check {\n name = \"Min.io Server HTTP Check Live\"\n type = \"http\"\n port = \"http\"\n protocol = \"http\"\n method = \"GET\"\n path = \"/minio/health/live\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n check {\n name = \"Min.io Server HTTP Check Ready\"\n type = \"http\"\n port = \"http\"\n protocol = \"http\"\n method = \"GET\"\n path = \"/minio/health/ready\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n # The \"resources\" stanza describes the requirements a task needs to\n # execute. Resource requirements include memory, network, cpu, and more.\n # This ensures the task will execute on a machine that contains enough\n # resource capacity.\n #\n # For more information and examples on the \"resources\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/resources.html\n #\n resources {\n cpu = 200\n memory = 1024\n # The network stanza specifies the networking requirements for the task\n # group, including the network mode and port allocations. When scheduling\n # jobs in Nomad they are provisioned across your fleet of machines along\n # with other jobs and services. Because you don't know in advance what host\n # your job will be provisioned on, Nomad will provide your tasks with\n # network configuration when they start up.\n #\n # For more information and examples on the \"template\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/network.html\n #\n network {\n port \"http\" {\n static = 9000\n }\n }\n }\n }\n }\n}\n", + "template": "job \"${job_name}\" {\n # The \"region\" parameter specifies the region in which to execute the job.\n # If omitted, this inherits the default region name of \"global\".\n # region = \"global\"\n #\n # The \"datacenters\" parameter specifies the list of datacenters which should\n # be considered when placing this task. This must be provided.\n datacenters = \"${datacenters}\"\n\n # The \"type\" parameter controls the type of job, which impacts the scheduler's\n # decision on placement. This configuration is optional and defaults to\n # \"service\". For a full list of job types and their differences, please see\n # the online documentation.\n #\n # For more information, please see the online documentation at:\n #\n # https://www.nomadproject.io/docs/jobspec/schedulers.html\n #\n type = \"service\"\n\n update {\n # The \"max_parallel\" parameter specifies the maximum number of updates to\n # perform in parallel. In this case, this specifies to update a single task\n # at a time.\n max_parallel = 1\n\n health_check = \"checks\"\n\n # The \"min_healthy_time\" parameter specifies the minimum time the allocation\n # must be in the healthy state before it is marked as healthy and unblocks\n # further allocations from being updated.\n min_healthy_time = \"10s\"\n\n # The \"healthy_deadline\" parameter specifies the deadline in which the\n # allocation must be marked as healthy after which the allocation is\n # automatically transitioned to unhealthy. Transitioning to unhealthy will\n # fail the deployment and potentially roll back the job if \"auto_revert\" is\n # set to true.\n healthy_deadline = \"3m\"\n\n # The \"progress_deadline\" parameter specifies the deadline in which an\n # allocation must be marked as healthy. The deadline begins when the first\n # allocation for the deployment is created and is reset whenever an allocation\n # as part of the deployment transitions to a healthy state. If no allocation\n # transitions to the healthy state before the progress deadline, the\n # deployment is marked as failed.\n progress_deadline = \"10m\"\n\n%{ if use_canary }\n # The \"canary\" parameter specifies that changes to the job that would result\n # in destructive updates should create the specified number of canaries\n # without stopping any previous allocations. Once the operator determines the\n # canaries are healthy, they can be promoted which unblocks a rolling update\n # of the remaining allocations at a rate of \"max_parallel\".\n #\n # Further, setting \"canary\" equal to the count of the task group allows\n # blue/green deployments. When the job is updated, a full set of the new\n # version is deployed and upon promotion the old version is stopped.\n canary = 1\n\n # Specifies if the job should auto-promote to the canary version when all\n # canaries become healthy during a deployment. Defaults to false which means\n # canaries must be manually updated with the nomad deployment promote\n # command.\n auto_promote = true\n\n # The \"auto_revert\" parameter specifies if the job should auto-revert to the\n # last stable job on deployment failure. A job is marked as stable if all the\n # allocations as part of its deployment were marked healthy.\n auto_revert = true\n%{ endif }\n }\n\n # All groups in this job should be scheduled on different hosts.\n constraint {\n operator = \"distinct_hosts\"\n value = \"true\"\n }\n\n # The \"group\" stanza defines a series of tasks that should be co-located on\n # the same Nomad client. Any task within a group will be placed on the same\n # client.\n #\n # For more information and examples on the \"group\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/group.html\n #\n group \"prod-group1-minio\" {\n # The \"count\" parameter specifies the number of the task groups that should\n # be running under this group. This value must be non-negative and defaults\n # to 1.\n count = ${group_count}\n\n # https://www.nomadproject.io/docs/job-specification/volume\n %{ if use_host_volume }\n volume \"prod-volume1-minio\" {\n type = \"host\"\n read_only = false\n source = \"${host_volume}\"\n }\n %{ endif }\n\n # The \"task\" stanza creates an individual unit of work, such as a Docker\n # container, web application, or batch processing.\n #\n # For more information and examples on the \"task\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/task.html\n #\n task \"prod-task1-minio\" {\n # The \"driver\" parameter specifies the task driver that should be used to\n # run the task.\n driver = \"docker\"\n\n %{ if use_host_volume }\n volume_mount {\n volume = \"prod-volume1-minio\"\n destination = \"${data_dir}\"\n read_only = false\n }\n %{ endif }\n\n %{ if use_vault_provider }\n vault {\n policies = \"${vault_kv_policy_name}\"\n }\n %{ endif }\n\n # The \"config\" stanza specifies the driver configuration, which is passed\n # directly to the driver to start the task. The details of configurations\n # are specific to each driver, so please see specific driver\n # documentation for more information.\n config {\n image = \"${image}\"\n dns_servers = [ \"$${attr.unique.network.ip-address}\" ]\n network_mode = \"host\"\n command = \"server\"\n args = [ \"${host}:${port}${data_dir}\" ]\n port_map {\n http = ${port}\n }\n privileged = false\n }\n\n # The env stanza configures a list of environment variables to populate\n # the task's environment before starting.\n env {\n%{ if use_vault_provider }\n{{ with secret \"${vault_kv_path}\" }}\n MINIO_ACCESS_KEY = \"{{ .Data.data.${vault_kv_field_access_key} }}\"\n MINIO_SECRET_KEY = \"{{ .Data.data.${vault_kv_field_secret_key} }}\"\n{{ end }}\n%{ else }\n MINIO_ACCESS_KEY = \"${access_key}\"\n MINIO_SECRET_KEY = \"${secret_key}\"\n%{ endif }\n ${ envs }\n }\n\n # The service stanza instructs Nomad to register a service with Consul.\n #\n # For more information and examples on the \"task\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/service.html\n #\n service {\n name = \"${service_name}\"\n port = \"http\"\n tags = [ \"storage$${NOMAD_ALLOC_INDEX}\" ]\n check {\n name = \"Min.io Server HTTP Check Live\"\n type = \"http\"\n port = \"http\"\n protocol = \"http\"\n method = \"GET\"\n path = \"/minio/health/live\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n check {\n name = \"Min.io Server HTTP Check Ready\"\n type = \"http\"\n port = \"http\"\n protocol = \"http\"\n method = \"GET\"\n path = \"/minio/health/ready\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n # The \"resources\" stanza describes the requirements a task needs to\n # execute. Resource requirements include memory, network, cpu, and more.\n # This ensures the task will execute on a machine that contains enough\n # resource capacity.\n #\n # For more information and examples on the \"resources\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/resources.html\n #\n resources {\n cpu = ${cpu}\n memory = ${memory}\n # The network stanza specifies the networking requirements for the task\n # group, including the network mode and port allocations. When scheduling\n # jobs in Nomad they are provisioned across your fleet of machines along\n # with other jobs and services. Because you don't know in advance what host\n # your job will be provisioned on, Nomad will provide your tasks with\n # network configuration when they start up.\n #\n # For more information and examples on the \"template\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/network.html\n #\n network {\n port \"http\" {\n static = ${port}\n }\n }\n }\n }\n }\n}\n", + "vars": { + "access_key": "minio", + "cpu": "200", + "cpu_proxy": "200", + "data_dir": "/data/", + "datacenters": "yul1", + "envs": "MINIO_BROWSER=\"off\"", + "group_count": "4", + "host": "http://10.32.8.1{4...7}", + "host_volume": "prod-volume-data1-1", + "image": "minio/minio:RELEASE.2020-12-03T05-49-24Z", + "job_name": "prod-minio", + "memory": "1024", + "memory_proxy": "128", + "port": "9000", + "secret_key": "minio123", + "service_name": "storage", + "upstreams": "[]", + "use_canary": "true", + "use_host_volume": "true", + "use_vault_provider": "false" + } + }, + "sensitive_attributes": [] + } + ] + }, + { + "module": "module.prod_storage", + "mode": "managed", + "type": "nomad_job", + "name": "nomad_job_mc", + "provider": "provider[\"registry.terraform.io/hashicorp/nomad\"].yul1", + "instances": [ + { + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "1b0598ea-3d27-371d-5368-c0d0642eb759", + "44dfb610-ff7d-b585-59a5-ee34372fcf92", + "9f94eb7e-9bde-b8b1-e963-99d516165921", + "a4bb7b9d-eee3-e0d0-5e92-f50ea7e35e89", + "b8e98991-25ac-eb64-b1f9-36c3d4e54fa8" + ], + "datacenters": [ + "yul1" + ], + "deployment_id": "", + "deployment_status": "", + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": false, + "id": "prod-mc", + "jobspec": "job \"prod-mc\" {\n # The \"region\" parameter specifies the region in which to execute the job.\n # If omitted, this inherits the default region name of \"global\".\n # region = \"global\"\n #\n # The \"datacenters\" parameter specifies the list of datacenters which should\n # be considered when placing this task. This must be provided.\n datacenters = \"yul1\"\n\n # The \"type\" parameter controls the type of job, which impacts the scheduler's\n # decision on placement. This configuration is optional and defaults to\n # \"service\". For a full list of job types and their differences, please see\n # the online documentation.\n #\n # For more information, please see the online documentation at:\n #\n # https://www.nomadproject.io/docs/jobspec/schedulers.html\n #\n type = \"batch\"\n\n # The \"group\" stanza defines a series of tasks that should be co-located on\n # the same Nomad client. Any task within a group will be placed on the same\n # client.\n #\n # For more information and examples on the \"group\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/group.html\n #\n group \"prod-group1-mc\" {\n task \"prod-task1-create-buckets\" {\n # The \"driver\" parameter specifies the task driver that should be used to\n # run the task.\n driver = \"docker\"\n\n \n\n # The \"config\" stanza specifies the driver configuration, which is passed\n # directly to the driver to start the task. The details of configurations\n # are specific to each driver, so please see specific driver\n # documentation for more information.\n config {\n image = \"minio/mc:RELEASE.2020-12-10T01-26-17Z\"\n entrypoint = [\n \"/bin/sh\",\n \"-c\",\n \"mc config host add LOCALMINIO http://storage.service.consul:9000 $MINIO_ACCESS_KEY $MINIO_SECRET_KEY \u0026\u0026 mc mb -p LOCALMINIO/logs.fd.io LOCALMINIO/docs.fd.io ; mc policy set public LOCALMINIO/logs.fd.io mc policy set public LOCALMINIO/docs.fd.io\"\n ]\n dns_servers = [ \"${attr.unique.network.ip-address}\" ]\n privileged = false\n }\n\n # The env stanza configures a list of environment variables to populate\n # the task's environment before starting.\n env {\n \n MINIO_ACCESS_KEY = \"minio\"\n MINIO_SECRET_KEY = \"minio123\"\n \n \n }\n }\n }\n}\n", + "json": null, + "modify_index": "5880908", + "name": "prod-mc", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "prod-group1-mc", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "prod-task1-create-buckets", + "volume_mounts": [] + } + ], + "volumes": [] + } + ], + "type": "batch" + }, + "sensitive_attributes": [], + "private": "bnVsbA==", + "dependencies": [ + "module.prod_storage.data.template_file.nomad_job_mc", + "module.prod_storage.nomad_job.nomad_job_minio" + ] + } + ] + }, + { + "module": "module.prod_storage", + "mode": "managed", + "type": "nomad_job", + "name": "nomad_job_minio", + "provider": "provider[\"registry.terraform.io/hashicorp/nomad\"].yul1", + "instances": [ + { + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "0ead2172-72ee-c52e-abd2-d9a6f100d27c", + "476369b7-d15d-19ec-25a7-2f46da76eeb6", + "885b8e38-cb0e-93ba-cb68-da9e2d7bbeb7", + "ac621157-f496-d868-8396-c2a463007bb6" + ], + "datacenters": [ + "yul1" + ], + "deployment_id": "a41d0bf5-d89b-19fa-9ed3-b8d39d6e879e", + "deployment_status": "successful", + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": false, + "id": "prod-minio", + "jobspec": "job \"prod-minio\" {\n # The \"region\" parameter specifies the region in which to execute the job.\n # If omitted, this inherits the default region name of \"global\".\n # region = \"global\"\n #\n # The \"datacenters\" parameter specifies the list of datacenters which should\n # be considered when placing this task. This must be provided.\n datacenters = \"yul1\"\n\n # The \"type\" parameter controls the type of job, which impacts the scheduler's\n # decision on placement. This configuration is optional and defaults to\n # \"service\". For a full list of job types and their differences, please see\n # the online documentation.\n #\n # For more information, please see the online documentation at:\n #\n # https://www.nomadproject.io/docs/jobspec/schedulers.html\n #\n type = \"service\"\n\n update {\n # The \"max_parallel\" parameter specifies the maximum number of updates to\n # perform in parallel. In this case, this specifies to update a single task\n # at a time.\n max_parallel = 1\n\n health_check = \"checks\"\n\n # The \"min_healthy_time\" parameter specifies the minimum time the allocation\n # must be in the healthy state before it is marked as healthy and unblocks\n # further allocations from being updated.\n min_healthy_time = \"10s\"\n\n # The \"healthy_deadline\" parameter specifies the deadline in which the\n # allocation must be marked as healthy after which the allocation is\n # automatically transitioned to unhealthy. Transitioning to unhealthy will\n # fail the deployment and potentially roll back the job if \"auto_revert\" is\n # set to true.\n healthy_deadline = \"3m\"\n\n # The \"progress_deadline\" parameter specifies the deadline in which an\n # allocation must be marked as healthy. The deadline begins when the first\n # allocation for the deployment is created and is reset whenever an allocation\n # as part of the deployment transitions to a healthy state. If no allocation\n # transitions to the healthy state before the progress deadline, the\n # deployment is marked as failed.\n progress_deadline = \"10m\"\n\n\n # The \"canary\" parameter specifies that changes to the job that would result\n # in destructive updates should create the specified number of canaries\n # without stopping any previous allocations. Once the operator determines the\n # canaries are healthy, they can be promoted which unblocks a rolling update\n # of the remaining allocations at a rate of \"max_parallel\".\n #\n # Further, setting \"canary\" equal to the count of the task group allows\n # blue/green deployments. When the job is updated, a full set of the new\n # version is deployed and upon promotion the old version is stopped.\n canary = 1\n\n # Specifies if the job should auto-promote to the canary version when all\n # canaries become healthy during a deployment. Defaults to false which means\n # canaries must be manually updated with the nomad deployment promote\n # command.\n auto_promote = true\n\n # The \"auto_revert\" parameter specifies if the job should auto-revert to the\n # last stable job on deployment failure. A job is marked as stable if all the\n # allocations as part of its deployment were marked healthy.\n auto_revert = true\n\n }\n\n # All groups in this job should be scheduled on different hosts.\n constraint {\n operator = \"distinct_hosts\"\n value = \"true\"\n }\n\n # The \"group\" stanza defines a series of tasks that should be co-located on\n # the same Nomad client. Any task within a group will be placed on the same\n # client.\n #\n # For more information and examples on the \"group\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/group.html\n #\n group \"prod-group1-minio\" {\n # The \"count\" parameter specifies the number of the task groups that should\n # be running under this group. This value must be non-negative and defaults\n # to 1.\n count = 4\n\n # https://www.nomadproject.io/docs/job-specification/volume\n \n volume \"prod-volume1-minio\" {\n type = \"host\"\n read_only = false\n source = \"prod-volume-data1-1\"\n }\n \n\n # The \"task\" stanza creates an individual unit of work, such as a Docker\n # container, web application, or batch processing.\n #\n # For more information and examples on the \"task\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/task.html\n #\n task \"prod-task1-minio\" {\n # The \"driver\" parameter specifies the task driver that should be used to\n # run the task.\n driver = \"docker\"\n\n \n volume_mount {\n volume = \"prod-volume1-minio\"\n destination = \"/data/\"\n read_only = false\n }\n \n\n \n\n # The \"config\" stanza specifies the driver configuration, which is passed\n # directly to the driver to start the task. The details of configurations\n # are specific to each driver, so please see specific driver\n # documentation for more information.\n config {\n image = \"minio/minio:RELEASE.2020-12-03T05-49-24Z\"\n dns_servers = [ \"${attr.unique.network.ip-address}\" ]\n network_mode = \"host\"\n command = \"server\"\n args = [ \"http://10.32.8.1{4...7}:9000/data/\" ]\n port_map {\n http = 9000\n }\n privileged = false\n }\n\n # The env stanza configures a list of environment variables to populate\n # the task's environment before starting.\n env {\n\n MINIO_ACCESS_KEY = \"minio\"\n MINIO_SECRET_KEY = \"minio123\"\n\n MINIO_BROWSER=\"off\"\n }\n\n # The service stanza instructs Nomad to register a service with Consul.\n #\n # For more information and examples on the \"task\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/service.html\n #\n service {\n name = \"storage\"\n port = \"http\"\n tags = [ \"storage${NOMAD_ALLOC_INDEX}\" ]\n check {\n name = \"Min.io Server HTTP Check Live\"\n type = \"http\"\n port = \"http\"\n protocol = \"http\"\n method = \"GET\"\n path = \"/minio/health/live\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n check {\n name = \"Min.io Server HTTP Check Ready\"\n type = \"http\"\n port = \"http\"\n protocol = \"http\"\n method = \"GET\"\n path = \"/minio/health/ready\"\n interval = \"10s\"\n timeout = \"2s\"\n }\n }\n\n # The \"resources\" stanza describes the requirements a task needs to\n # execute. Resource requirements include memory, network, cpu, and more.\n # This ensures the task will execute on a machine that contains enough\n # resource capacity.\n #\n # For more information and examples on the \"resources\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/resources.html\n #\n resources {\n cpu = 200\n memory = 1024\n # The network stanza specifies the networking requirements for the task\n # group, including the network mode and port allocations. When scheduling\n # jobs in Nomad they are provisioned across your fleet of machines along\n # with other jobs and services. Because you don't know in advance what host\n # your job will be provisioned on, Nomad will provide your tasks with\n # network configuration when they start up.\n #\n # For more information and examples on the \"template\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/network.html\n #\n network {\n port \"http\" {\n static = 9000\n }\n }\n }\n }\n }\n}\n", + "json": null, + "modify_index": "5880421", + "name": "prod-minio", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 4, + "meta": {}, + "name": "prod-group1-minio", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "prod-task1-minio", + "volume_mounts": [ + { + "destination": "/data/", + "read_only": false, + "volume": "prod-volume1-minio" + } + ] + } + ], + "volumes": [ + { + "name": "prod-volume1-minio", + "read_only": false, + "source": "prod-volume-data1-1", + "type": "host" + } + ] + } + ], + "type": "service" + }, + "sensitive_attributes": [], + "private": "bnVsbA==", + "dependencies": [ + "module.prod_storage.data.template_file.nomad_job_minio" + ] + } + ] + }, + { + "module": "module.prod_storage", + "mode": "managed", + "type": "nomad_job", + "name": "prod_nginx", + "provider": "provider[\"registry.terraform.io/hashicorp/nomad\"].yul1", + "instances": [ + { + "schema_version": 0, + "attributes": { + "allocation_ids": [ + "6d09eda3-d917-51f8-1d75-619f4f8088ec" + ], + "datacenters": [ + "yul1" + ], + "deployment_id": null, + "deployment_status": null, + "deregister_on_destroy": true, + "deregister_on_id_change": true, + "detach": true, + "id": "prod-nginx", + "jobspec": "job \"prod-nginx\" {\n # The \"region\" parameter specifies the region in which to execute the job.\n # If omitted, this inherits the default region name of \"global\".\n # region = \"global\"\n #\n # The \"datacenters\" parameter specifies the list of datacenters which should\n # be considered when placing this task. This must be provided.\n datacenters = [ \"yul1\" ]\n\n # The \"type\" parameter controls the type of job, which impacts the scheduler's\n # decision on placement. This configuration is optional and defaults to\n # \"service\". For a full list of job types and their differences, please see\n # the online documentation.\n #\n # For more information, please see the online documentation at:\n #\n # https://www.nomadproject.io/docs/jobspec/schedulers.html\n #\n type = \"service\"\n\n update {\n # The \"max_parallel\" parameter specifies the maximum number of updates to\n # perform in parallel. In this case, this specifies to update a single task\n # at a time.\n max_parallel = 0\n\n # The \"min_healthy_time\" parameter specifies the minimum time the allocation\n # must be in the healthy state before it is marked as healthy and unblocks\n # further allocations from being updated.\n min_healthy_time = \"10s\"\n\n # The \"healthy_deadline\" parameter specifies the deadline in which the\n # allocation must be marked as healthy after which the allocation is\n # automatically transitioned to unhealthy. Transitioning to unhealthy will\n # fail the deployment and potentially roll back the job if \"auto_revert\" is\n # set to true.\n healthy_deadline = \"3m\"\n\n # The \"progress_deadline\" parameter specifies the deadline in which an\n # allocation must be marked as healthy. The deadline begins when the first\n # allocation for the deployment is created and is reset whenever an allocation\n # as part of the deployment transitions to a healthy state. If no allocation\n # transitions to the healthy state before the progress deadline, the\n # deployment is marked as failed.\n progress_deadline = \"10m\"\n\n # The \"auto_revert\" parameter specifies if the job should auto-revert to the\n # last stable job on deployment failure. A job is marked as stable if all the\n # allocations as part of its deployment were marked healthy.\n auto_revert = false\n\n # The \"canary\" parameter specifies that changes to the job that would result\n # in destructive updates should create the specified number of canaries\n # without stopping any previous allocations. Once the operator determines the\n # canaries are healthy, they can be promoted which unblocks a rolling update\n # of the remaining allocations at a rate of \"max_parallel\".\n #\n # Further, setting \"canary\" equal to the count of the task group allows\n # blue/green deployments. When the job is updated, a full set of the new\n # version is deployed and upon promotion the old version is stopped.\n canary = 0\n }\n\n # The reschedule stanza specifies the group's rescheduling strategy. If\n # specified at the job level, the configuration will apply to all groups\n # within the job. If the reschedule stanza is present on both the job and the\n # group, they are merged with the group stanza taking the highest precedence\n # and then the job.\n reschedule {\n delay = \"30s\"\n delay_function = \"constant\"\n unlimited = true\n }\n\n\n # The \"group\" stanza defines a series of tasks that should be co-located on\n # the same Nomad client. Any task within a group will be placed on the same\n # client.\n #\n # For more information and examples on the \"group\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/group.html\n #\n group \"prod-group1-nginx\" {\n # The \"count\" parameter specifies the number of the task groups that should\n # be running under this group. This value must be non-negative and defaults\n # to 1.\n count = 1\n\n # The restart stanza configures a tasks's behavior on task failure. Restarts\n # happen on the client that is running the task.\n restart {\n interval = \"10m\"\n attempts = 2\n delay = \"15s\"\n mode = \"fail\"\n }\n\n # All groups in this job should be scheduled on different hosts.\n constraint {\n operator = \"distinct_hosts\"\n value = \"false\"\n }\n\n # The volume stanza allows the group to specify that it requires a given\n # volume from the cluster.\n #\n # For more information and examples on the \"volume\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/volume\n volume \"prod-volume1-storage\" {\n type = \"host\"\n read_only = false\n source = \"prod-volume-data1-1\"\n }\n\n # The \"task\" stanza creates an individual unit of work, such as a Docker\n # container, web application, or batch processing.\n #\n # For more information and examples on the \"task\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/task.html\n #\n task \"prod-task1-nginx\" {\n # The \"driver\" parameter specifies the task driver that should be used to\n # run the task.\n driver = \"docker\"\n\n volume_mount {\n volume = \"prod-volume1-storage\"\n destination = \"/data/\"\n read_only = true\n }\n\n # The \"config\" stanza specifies the driver configuration, which is passed\n # directly to the driver to start the task. The details of configurations\n # are specific to each driver, so please see specific driver\n # documentation for more information.\n config {\n image = \"nginx:stable\"\n dns_servers = [ \"${attr.unique.network.ip-address}\" ]\n port_map {\n https = 443\n }\n privileged = false\n volumes = [\n \"/etc/consul.d/ssl/consul.pem:/etc/ssl/certs/nginx-cert.pem\",\n \"/etc/consul.d/ssl/consul-key.pem:/etc/ssl/private/nginx-key.pem\",\n \"custom/upstream.conf:/etc/nginx/conf.d/upstream.conf\",\n \"custom/logs.conf:/etc/nginx/conf.d/logs.conf\",\n \"custom/docs.conf:/etc/nginx/conf.d/docs.conf\"\n ]\n }\n\n # The \"template\" stanza instructs Nomad to manage a template, such as\n # a configuration file or script. This template can optionally pull data\n # from Consul or Vault to populate runtime configuration data.\n #\n # For more information and examples on the \"template\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/template.html\n #\n template {\n data = \u003c\u003cEOH\n upstream storage {\n server storage0.storage.service.consul:9000;\n server storage1.storage.service.consul:9000;\n server storage2.storage.service.consul:9000;\n server storage3.storage.service.consul:9000;\n }\n EOH\n destination = \"custom/upstream.conf\"\n }\n template {\n data = \u003c\u003cEOH\n server {\n listen 443 ssl default_server;\n server_name logs.nginx.service.consul;\n keepalive_timeout 70;\n ssl_session_cache shared:SSL:10m;\n ssl_session_timeout 10m;\n ssl_protocols TLSv1.2;\n ssl_prefer_server_ciphers on;\n ssl_ciphers \"ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384\";\n ssl_certificate /etc/ssl/certs/nginx-cert.pem;\n ssl_certificate_key /etc/ssl/private/nginx-key.pem;\n location / {\n chunked_transfer_encoding off;\n proxy_connect_timeout 300;\n proxy_http_version 1.1;\n proxy_set_header Host $host:$server_port;\n proxy_set_header Connection \"\";\n proxy_pass http://storage/logs.fd.io/;\n server_name_in_redirect off;\n }\n location ~ (.*html.gz)$ {\n add_header Content-Encoding gzip;\n add_header Content-Type text/html;\n chunked_transfer_encoding off;\n proxy_connect_timeout 300;\n proxy_http_version 1.1;\n proxy_set_header Host $host:$server_port;\n proxy_set_header Connection \"\";\n proxy_pass http://storage/logs.fd.io/$1;\n server_name_in_redirect off;\n }\n location ~ (.*txt.gz|.*log.gz)$ {\n add_header Content-Encoding gzip;\n add_header Content-Type text/plain;\n chunked_transfer_encoding off;\n proxy_connect_timeout 300;\n proxy_http_version 1.1;\n proxy_set_header Host $host:$server_port;\n proxy_set_header Connection \"\";\n proxy_pass http://storage/logs.fd.io/$1;\n server_name_in_redirect off;\n }\n location ~ (.*xml.gz)$ {\n add_header Content-Encoding gzip;\n add_header Content-Type application/xml;\n chunked_transfer_encoding off;\n proxy_connect_timeout 300;\n proxy_http_version 1.1;\n proxy_set_header Host $host:$server_port;\n proxy_set_header Connection \"\";\n proxy_pass http://storage/logs.fd.io/$1;\n server_name_in_redirect off;\n }\n }\n EOH\n destination = \"custom/logs.conf\"\n }\n template {\n data = \u003c\u003cEOH\n server {\n listen 443 ssl;\n server_name docs.nginx.service.consul;\n keepalive_timeout 70;\n ssl_session_cache shared:SSL:10m;\n ssl_session_timeout 10m;\n ssl_protocols TLSv1.2;\n ssl_prefer_server_ciphers on;\n ssl_ciphers \"ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384\";\n ssl_certificate /etc/ssl/certs/nginx-cert.pem;\n ssl_certificate_key /etc/ssl/private/nginx-key.pem;\n location / {\n chunked_transfer_encoding off;\n proxy_connect_timeout 300;\n proxy_http_version 1.1;\n proxy_set_header Host $host:$server_port;\n proxy_set_header Connection \"\";\n proxy_pass http://storage/docs.fd.io/;\n server_name_in_redirect off;\n }\n }\n EOH\n destination = \"custom/docs.conf\"\n }\n\n # The service stanza instructs Nomad to register a service with Consul.\n #\n # For more information and examples on the \"task\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/service.html\n #\n service {\n name = \"nginx\"\n port = \"https\"\n tags = [ \"docs\", \"logs\" ]\n }\n\n # The \"resources\" stanza describes the requirements a task needs to\n # execute. Resource requirements include memory, network, cpu, and more.\n # This ensures the task will execute on a machine that contains enough\n # resource capacity.\n #\n # For more information and examples on the \"resources\" stanza, please see\n # the online documentation at:\n #\n # https://www.nomadproject.io/docs/job-specification/resources.html\n #\n resources {\n cpu = 1000\n memory = 1024\n network {\n mode = \"bridge\"\n port \"https\" {\n static = 443\n }\n }\n }\n }\n }\n}", + "json": null, + "modify_index": "5865884", + "name": "prod-nginx", + "namespace": "default", + "policy_override": null, + "purge_on_destroy": null, + "region": "global", + "task_groups": [ + { + "count": 1, + "meta": {}, + "name": "prod-group1-nginx", + "task": [ + { + "driver": "docker", + "meta": {}, + "name": "prod-task1-nginx", + "volume_mounts": [ + { + "destination": "/data/", + "read_only": true, + "volume": "prod-volume1-storage" + } + ] + } + ], + "volumes": [ + { + "name": "prod-volume1-storage", + "read_only": false, + "source": "prod-volume-data1-1", + "type": "host" + } + ] + } + ], + "type": "service" + }, + "sensitive_attributes": [], + "private": "bnVsbA==" + } + ] + } + ] +} diff --git a/terraform-ci-infra/1n_nmd/tools/artifacts.py b/terraform-ci-infra/1n_nmd/tools/artifacts.py new file mode 100755 index 0000000000..36bef7c5c6 --- /dev/null +++ b/terraform-ci-infra/1n_nmd/tools/artifacts.py @@ -0,0 +1,138 @@ +#!/usr/bin/python3 + +# Copyright (c) 2020 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Storage utilities library.""" + +import argparse +import gzip +import os +from mimetypes import MimeTypes + +from boto3 import resource +from botocore.client import Config + + +ENDPOINT_URL = u"http://storage.service.consul:9000" +AWS_ACCESS_KEY_ID = u"storage" +AWS_SECRET_ACCESS_KEY = u"Storage1234" +REGION_NAME = u"yul1" +COMPRESS_MIME = ( + u"text/html", + u"text/xml", + u"application/octet-stream" +) + + +def compress(src_fpath): + """Compress a single file. + + :param src_fpath: Input file path. + :type src_fpath: str + """ + with open(src_fpath, u"rb") as orig_file: + with gzip.open(f"{src_fpath}.gz", u"wb") as zipped_file: + zipped_file.writelines(orig_file) + + +def upload(storage, bucket, src_fpath, dst_fpath): + """Upload single file to destination bucket. + + :param storage: S3 storage resource. + :param bucket: S3 bucket name. + :param src_fpath: Input file path. + :param dst_fpath: Destination file path on remote storage. + :type storage: Object + :type bucket: str + :type src_fpath: str + :type dst_fpath: str + """ + mime = MimeTypes().guess_type(src_fpath)[0] + if not mime: + mime = "application/octet-stream" + + if mime in COMPRESS_MIME and bucket in "logs": + compress(src_fpath) + src_fpath = f"{src_fpath}.gz" + dst_fpath = f"{dst_fpath}.gz" + + storage.Bucket(f"{bucket}.fd.io").upload_file( + src_fpath, + dst_fpath, + ExtraArgs={ + u"ContentType": mime + } + ) + print(f"https://{bucket}.nginx.service.consul/{dst_fpath}") + + +def upload_recursive(storage, bucket, src_fpath): + """Recursively uploads input folder to destination. + + Example: + - bucket: logs + - src_fpath: /home/user + - dst_fpath: logs.fd.io/home/user + + :param storage: S3 storage resource. + :param bucket: S3 bucket name. + :param src_fpath: Input folder path. + :type storage: Object + :type bucket: str + :type src_fpath: str + """ + for path, _, files in os.walk(src_fpath): + for file in files: + _path = path.replace(src_fpath, u"") + _dir = src_fpath[1:] if src_fpath[0] == "/" else src_fpath + _dst_fpath = os.path.normpath(f"{_dir}/{_path}/{file}") + _src_fpath = os.path.join(path, file) + upload(storage, bucket, _src_fpath, _dst_fpath) + + +def main(): + """Main function for storage manipulation.""" + + parser = argparse.ArgumentParser() + parser.add_argument( + u"-d", u"--dir", required=True, type=str, + help=u"Directory to upload to storage." + ) + parser.add_argument( + u"-b", u"--bucket", required=True, type=str, + help=u"Target bucket on storage." + ) + args = parser.parse_args() + + # Create main storage resource. + storage = resource( + u"s3", + endpoint_url=ENDPOINT_URL, + aws_access_key_id=AWS_ACCESS_KEY_ID, + aws_secret_access_key=AWS_SECRET_ACCESS_KEY, + config=Config( + signature_version=u"s3v4" + ), + region_name=REGION_NAME + ) + + upload_recursive( + storage=storage, + bucket=args.bucket, + src_fpath=args.dir + ) + + +if __name__ == u"__main__": + main() diff --git a/resources/tools/terraform/1n_nmd/variables.tf b/terraform-ci-infra/1n_nmd/variables.tf index 0782f8669c..a575d0b51d 100644 --- a/resources/tools/terraform/1n_nmd/variables.tf +++ b/terraform-ci-infra/1n_nmd/variables.tf @@ -2,4 +2,10 @@ variable "nomad_provider_address" { description = "FD.io Nomad cluster address." type = string default = "http://nomad.service.consul:4646" +} + +variable "nomad_acl" { + description = "Nomad ACLs enabled/disabled" + type = bool + default = false }
\ No newline at end of file diff --git a/resources/tools/terraform/1n_nmd/prod_vpp_device/prod_csit_shim.nomad b/terraform-ci-infra/1n_nmd/vpp_device/prod_csit_shim.nomad index 328f503a0b..328f503a0b 100644 --- a/resources/tools/terraform/1n_nmd/prod_vpp_device/prod_csit_shim.nomad +++ b/terraform-ci-infra/1n_nmd/vpp_device/prod_csit_shim.nomad diff --git a/resources/tools/terraform/1n_nmd/prod_vpp_device/resources.tf b/terraform-ci-infra/1n_nmd/vpp_device/resources.tf index dace9094f2..dace9094f2 100644 --- a/resources/tools/terraform/1n_nmd/prod_vpp_device/resources.tf +++ b/terraform-ci-infra/1n_nmd/vpp_device/resources.tf diff --git a/resources/tools/terraform/1n_nmd/.gitignore b/terraform-ci-infra/2n_aws_c5n/.gitignore index fc64f0039f..fc64f0039f 100644 --- a/resources/tools/terraform/1n_nmd/.gitignore +++ b/terraform-ci-infra/2n_aws_c5n/.gitignore diff --git a/resources/tools/terraform/2n_aws_c5n/main.tf b/terraform-ci-infra/2n_aws_c5n/main.tf index c0da7a487e..c0da7a487e 100644 --- a/resources/tools/terraform/2n_aws_c5n/main.tf +++ b/terraform-ci-infra/2n_aws_c5n/main.tf diff --git a/resources/tools/terraform/2n_aws_c5n/nic.tf b/terraform-ci-infra/2n_aws_c5n/nic.tf index b0a54e9b98..b0a54e9b98 100644 --- a/resources/tools/terraform/2n_aws_c5n/nic.tf +++ b/terraform-ci-infra/2n_aws_c5n/nic.tf diff --git a/resources/tools/terraform/2n_aws_c5n/.gitignore b/terraform-ci-infra/3n_aws_c5n/.gitignore index fc64f0039f..fc64f0039f 100644 --- a/resources/tools/terraform/2n_aws_c5n/.gitignore +++ b/terraform-ci-infra/3n_aws_c5n/.gitignore diff --git a/resources/tools/terraform/3n_aws_c5n/main.tf b/terraform-ci-infra/3n_aws_c5n/main.tf index 9ba2b19abe..9ba2b19abe 100644 --- a/resources/tools/terraform/3n_aws_c5n/main.tf +++ b/terraform-ci-infra/3n_aws_c5n/main.tf diff --git a/resources/tools/terraform/3n_aws_c5n/nic.tf b/terraform-ci-infra/3n_aws_c5n/nic.tf index 3efd74fc14..3efd74fc14 100644 --- a/resources/tools/terraform/3n_aws_c5n/nic.tf +++ b/terraform-ci-infra/3n_aws_c5n/nic.tf diff --git a/resources/tools/terraform/3n_aws_c5n/.gitignore b/terraform-ci-infra/3n_azure_fsv2/.gitignore index fc64f0039f..fc64f0039f 100644 --- a/resources/tools/terraform/3n_aws_c5n/.gitignore +++ b/terraform-ci-infra/3n_azure_fsv2/.gitignore diff --git a/resources/tools/terraform/3n_azure_fsv2/main.tf b/terraform-ci-infra/3n_azure_fsv2/main.tf index 9f6739e676..9f6739e676 100644 --- a/resources/tools/terraform/3n_azure_fsv2/main.tf +++ b/terraform-ci-infra/3n_azure_fsv2/main.tf diff --git a/resources/tools/terraform/3n_azure_fsv2/nic.tf b/terraform-ci-infra/3n_azure_fsv2/nic.tf index 51692593c6..51692593c6 100644 --- a/resources/tools/terraform/3n_azure_fsv2/nic.tf +++ b/terraform-ci-infra/3n_azure_fsv2/nic.tf |