aboutsummaryrefslogtreecommitdiffstats
path: root/fdio.infra.terraform/1n_nmd/minio_s3_gateway
diff options
context:
space:
mode:
Diffstat (limited to 'fdio.infra.terraform/1n_nmd/minio_s3_gateway')
-rw-r--r--fdio.infra.terraform/1n_nmd/minio_s3_gateway/conf/nomad/minio.hcl246
-rw-r--r--fdio.infra.terraform/1n_nmd/minio_s3_gateway/main.tf51
-rw-r--r--fdio.infra.terraform/1n_nmd/minio_s3_gateway/variables.tf199
-rw-r--r--fdio.infra.terraform/1n_nmd/minio_s3_gateway/versions.tf13
4 files changed, 0 insertions, 509 deletions
diff --git a/fdio.infra.terraform/1n_nmd/minio_s3_gateway/conf/nomad/minio.hcl b/fdio.infra.terraform/1n_nmd/minio_s3_gateway/conf/nomad/minio.hcl
deleted file mode 100644
index 6210040b0c..0000000000
--- a/fdio.infra.terraform/1n_nmd/minio_s3_gateway/conf/nomad/minio.hcl
+++ /dev/null
@@ -1,246 +0,0 @@
-job "${job_name}" {
- # The "region" parameter specifies the region in which to execute the job.
- # If omitted, this inherits the default region name of "global".
- # region = "${region}"
-
- # The "datacenters" parameter specifies the list of datacenters which should
- # be considered when placing this task. This must be provided.
- datacenters = "${datacenters}"
-
- # The "type" parameter controls the type of job, which impacts the scheduler's
- # decision on placement. This configuration is optional and defaults to
- # "service". For a full list of job types and their differences, please see
- # the online documentation.
- #
- # https://www.nomadproject.io/docs/jobspec/schedulers
- #
- type = "service"
-
- update {
- # The "max_parallel" parameter specifies the maximum number of updates to
- # perform in parallel. In this case, this specifies to update a single task
- # at a time.
- max_parallel = ${max_parallel}
-
- health_check = "checks"
-
- # The "min_healthy_time" parameter specifies the minimum time the allocation
- # must be in the healthy state before it is marked as healthy and unblocks
- # further allocations from being updated.
- min_healthy_time = "10s"
-
- # The "healthy_deadline" parameter specifies the deadline in which the
- # allocation must be marked as healthy after which the allocation is
- # automatically transitioned to unhealthy. Transitioning to unhealthy will
- # fail the deployment and potentially roll back the job if "auto_revert" is
- # set to true.
- healthy_deadline = "3m"
-
- # The "progress_deadline" parameter specifies the deadline in which an
- # allocation must be marked as healthy. The deadline begins when the first
- # allocation for the deployment is created and is reset whenever an allocation
- # as part of the deployment transitions to a healthy state. If no allocation
- # transitions to the healthy state before the progress deadline, the
- # deployment is marked as failed.
- progress_deadline = "10m"
-
-%{ if use_canary }
- # The "canary" parameter specifies that changes to the job that would result
- # in destructive updates should create the specified number of canaries
- # without stopping any previous allocations. Once the operator determines the
- # canaries are healthy, they can be promoted which unblocks a rolling update
- # of the remaining allocations at a rate of "max_parallel".
- #
- # Further, setting "canary" equal to the count of the task group allows
- # blue/green deployments. When the job is updated, a full set of the new
- # version is deployed and upon promotion the old version is stopped.
- canary = ${canary}
-
- # Specifies if the job should auto-promote to the canary version when all
- # canaries become healthy during a deployment. Defaults to false which means
- # canaries must be manually updated with the nomad deployment promote
- # command.
- auto_promote = ${auto_promote}
-
- # The "auto_revert" parameter specifies if the job should auto-revert to the
- # last stable job on deployment failure. A job is marked as stable if all the
- # allocations as part of its deployment were marked healthy.
- auto_revert = ${auto_revert}
-%{ endif }
- }
-
- # All groups in this job should be scheduled on different hosts.
- constraint {
- operator = "distinct_hosts"
- value = "true"
- }
-
- # The "group" stanza defines a series of tasks that should be co-located on
- # the same Nomad client. Any task within a group will be placed on the same
- # client.
- #
- # https://www.nomadproject.io/docs/job-specification/group
- #
- group "${job_name}-group-1" {
- # The "count" parameter specifies the number of the task groups that should
- # be running under this group. This value must be non-negative and defaults
- # to 1.
- count = ${group_count}
-
- # The volume stanza allows the group to specify that it requires a given
- # volume from the cluster. The key of the stanza is the name of the volume
- # as it will be exposed to task configuration.
- #
- # https://www.nomadproject.io/docs/job-specification/volume
- %{ if use_host_volume }
- volume "${job_name}-volume-1" {
- type = "host"
- read_only = false
- source = "${volume_source}"
- }
- %{ endif }
-
- # The restart stanza configures a tasks's behavior on task failure. Restarts
- # happen on the client that is running the task.
- #
- # https://www.nomadproject.io/docs/job-specification/restart
- #
- restart {
- interval = "30m"
- attempts = 40
- delay = "15s"
- mode = "delay"
- }
-
- # The network stanza specifies the networking requirements for the task
- # group, including the network mode and port allocations. When scheduling
- # jobs in Nomad they are provisioned across your fleet of machines along
- # with other jobs and services. Because you don't know in advance what host
- # your job will be provisioned on, Nomad will provide your tasks with
- # network configuration when they start up.
- #
- # https://www.nomadproject.io/docs/job-specification/network
- #
- network {
- port "base" {
- static = ${port_base}
- to = ${port_base}
- }
- port "console" {
- static = ${port_console}
- to = ${port_console}
- }
- }
-
- # The "task" stanza creates an individual unit of work, such as a Docker
- # container, web application, or batch processing.
- #
- # https://www.nomadproject.io/docs/job-specification/task.html
- #
- task "${job_name}-task-1" {
- # The "driver" parameter specifies the task driver that should be used to
- # run the task.
- driver = "exec"
-
- %{ if use_host_volume }
- volume_mount {
- volume = "${job_name}-volume-1"
- destination = "${volume_destination}"
- read_only = false
- }
- %{ endif }
-
- %{ if use_vault_provider }
- vault {
- policies = "${vault_kv_policy_name}"
- }
- %{ endif }
-
- # The "config" stanza specifies the driver configuration, which is passed
- # directly to the driver to start the task. The details of configurations
- # are specific to each driver, so please see specific driver
- # documentation for more information.
- config {
- args = [
- "${mode}", "s3",
- "-address", ":${port_base}",
- "-console-address", ":${port_console}"
- ]
- command = "local/minio"
- }
-
- # The artifact stanza instructs Nomad to fetch and unpack a remote resource,
- # such as a file, tarball, or binary. Nomad downloads artifacts using the
- # popular go-getter library, which permits downloading artifacts from a
- # variety of locations using a URL as the input source.
- #
- # For more information and examples on the "artifact" stanza, please see
- # the online documentation at:
- #
- # https://www.nomadproject.io/docs/job-specification/artifact
- #
- artifact {
- source = "https://dl.min.io/server/minio/release/linux-amd64/minio"
- }
-
- # The env stanza configures a list of environment variables to populate
- # the task's environment before starting.
- env {
-%{ if use_vault_provider }
-{{ with secret "${vault_kv_path}" }}
- MINIO_ROOT_USER = "{{ .Data.data.${vault_kv_field_access_key} }}"
- MINIO_ROOT_PASSWORD = "{{ .Data.data.${vault_kv_field_secret_key} }}"
-{{ end }}
-%{ else }
- MINIO_ROOT_USER = "${access_key}"
- MINIO_ROOT_PASSWORD = "${secret_key}"
- AWS_ACCESS_KEY_ID = "${access_key}"
- AWS_SECRET_ACCESS_KEY = "${secret_key}"
-%{ endif }
- ${ envs }
- }
-
- # The service stanza instructs Nomad to register a service with Consul.
- #
- # https://www.nomadproject.io/docs/job-specification/service
- #
- service {
- name = "${service_name}"
- port = "base"
- tags = [ "${service_name}$${NOMAD_ALLOC_INDEX}" ]
- check {
- name = "Min.io Server HTTP Check Live"
- type = "http"
- port = "base"
- protocol = "http"
- method = "GET"
- path = "/minio/health/live"
- interval = "10s"
- timeout = "2s"
- }
- check {
- name = "Min.io Server HTTP Check Ready"
- type = "http"
- port = "base"
- protocol = "http"
- method = "GET"
- path = "/minio/health/ready"
- interval = "10s"
- timeout = "2s"
- }
- }
-
- # The "resources" stanza describes the requirements a task needs to
- # execute. Resource requirements include memory, network, cpu, and more.
- # This ensures the task will execute on a machine that contains enough
- # resource capacity.
- #
- # https://www.nomadproject.io/docs/job-specification/resources
- #
- resources {
- cpu = ${cpu}
- memory = ${memory}
- }
- }
- }
-}
diff --git a/fdio.infra.terraform/1n_nmd/minio_s3_gateway/main.tf b/fdio.infra.terraform/1n_nmd/minio_s3_gateway/main.tf
deleted file mode 100644
index 2ae3cac9c2..0000000000
--- a/fdio.infra.terraform/1n_nmd/minio_s3_gateway/main.tf
+++ /dev/null
@@ -1,51 +0,0 @@
-locals {
- datacenters = join(",", var.datacenters)
- envs = join("\n", concat([], var.envs))
- upstreams = jsonencode(var.upstreams)
-}
-
-data "template_file" "nomad_job_minio" {
- template = file("${path.module}/conf/nomad/minio.hcl")
- vars = {
- access_key = var.access_key
- auto_promote = var.auto_promote
- auto_revert = var.auto_revert
- canary = var.canary
- cpu = var.cpu
- cpu_proxy = var.resource_proxy.cpu
- datacenters = local.datacenters
- envs = local.envs
- group_count = var.group_count
- host = var.host
- image = var.image
- job_name = var.job_name
- max_parallel = var.max_parallel
- memory = var.memory
- memory_proxy = var.resource_proxy.memory
- mode = var.mode
- port_base = var.port_base
- port_console = var.port_console
- region = var.region
- secret_key = var.secret_key
- service_name = var.service_name
- use_canary = var.use_canary
- use_host_volume = var.use_host_volume
- upstreams = local.upstreams
- use_vault_kms = var.kms_variables.use_vault_kms
- use_vault_provider = var.vault_secret.use_vault_provider
- vault_address = var.kms_variables.vault_address
- vault_kms_approle_kv = var.kms_variables.vault_kms_approle_kv
- vault_kms_key_name = var.kms_variables.vault_kms_key_name
- vault_kv_policy_name = var.vault_secret.vault_kv_policy_name
- vault_kv_path = var.vault_secret.vault_kv_path
- vault_kv_field_access_key = var.vault_secret.vault_kv_field_access_key
- vault_kv_field_secret_key = var.vault_secret.vault_kv_field_secret_key
- volume_destination = var.volume_destination
- volume_source = var.volume_source
- }
-}
-
-resource "nomad_job" "nomad_job_minio" {
- jobspec = data.template_file.nomad_job_minio.rendered
- detach = false
-}
diff --git a/fdio.infra.terraform/1n_nmd/minio_s3_gateway/variables.tf b/fdio.infra.terraform/1n_nmd/minio_s3_gateway/variables.tf
deleted file mode 100644
index 6fb351df26..0000000000
--- a/fdio.infra.terraform/1n_nmd/minio_s3_gateway/variables.tf
+++ /dev/null
@@ -1,199 +0,0 @@
-# Nomad
-
-variable "datacenters" {
- description = "Specifies the list of DCs to be considered placing this task"
- type = list(string)
- default = ["dc1"]
-}
-
-variable "region" {
- description = "Specifies the list of DCs to be considered placing this task"
- type = string
- default = "global"
-}
-
-variable "volume_source" {
- description = "The name of the volume to request"
- type = string
- default = "persistence"
-}
-
-# Minio
-variable "access_key" {
- description = "Minio access key"
- type = string
- default = "minio"
-}
-
-variable "auto_promote" {
- description = "Specifies if the job should auto-promote to the canary version"
- type = bool
- default = true
-}
-
-variable "auto_revert" {
- description = "Specifies if the job should auto-revert to the last stable job"
- type = bool
- default = true
-}
-
-variable "canary" {
- description = "Equal to the count of the task group allows blue/green depl."
- type = number
- default = 1
-}
-
-variable "cpu" {
- description = "Specifies the CPU required to run this task in MHz"
- type = number
- default = 1000
-}
-
-variable "envs" {
- description = "Minio environment variables"
- type = list(string)
- default = []
-}
-
-variable "group_count" {
- description = "Specifies the number of the task groups running under this one"
- type = number
- default = 1
-}
-
-variable "host" {
- description = "Minio host"
- type = string
- default = "127.0.0.1"
-}
-
-variable "image" {
- description = "The Docker image to run"
- type = string
- default = "minio/minio:latest"
-}
-
-variable "job_name" {
- description = "Specifies a name for the job"
- type = string
- default = "minio"
-}
-
-variable "kms_variables" {
- type = object({
- use_vault_kms = string
- vault_address = string,
- vault_kms_approle_kv = string,
- vault_kms_key_name = string
- })
- description = "Set of properties to be able to transit secrets in vault"
- default = {
- use_vault_kms = false
- vault_address = "",
- vault_kms_approle_kv = "",
- vault_kms_key_name = ""
- }
-}
-
-variable "max_parallel" {
- description = "Specifies the maximum number of updates to perform in parallel"
- type = number
- default = 1
-}
-
-variable "memory" {
- description = "Specifies the memory required in MB"
- type = number
- default = 1024
-}
-
-variable "mode" {
- description = "Specifies the Minio mode"
- type = string
- default = "server"
-}
-
-variable "port_base" {
- description = "Specifies the static TCP/UDP port to allocate"
- type = number
- default = 9000
-}
-
-variable "port_console" {
- description = "Specifies the static TCP/UDP port to allocate"
- type = number
- default = 9001
-}
-
-variable "resource_proxy" {
- description = "Minio proxy resources"
- type = object({
- cpu = number,
- memory = number
- })
- default = {
- cpu = 2000,
- memory = 1024
- }
- validation {
- condition = var.resource_proxy.cpu >= 200 && var.resource_proxy.memory >= 128
- error_message = "Proxy resource must be at least: cpu=200, memory=128."
- }
-}
-
-variable "service_name" {
- description = "Specifies the name this service will be advertised in Consul"
- type = string
- default = "minio"
-}
-
-variable "secret_key" {
- description = "Minio secret key"
- type = string
- default = "minio"
-}
-
-variable "upstreams" {
- type = list(object({
- service_name = string,
- port = number,
- }))
- description = "List of upstream services"
- default = []
-}
-
-variable "use_canary" {
- description = "Uses canary deployment for Minio"
- type = bool
- default = false
-}
-
-variable "use_host_volume" {
- description = "Use Nomad host volume feature"
- type = bool
- default = false
-}
-
-variable "vault_secret" {
- type = object({
- use_vault_provider = bool,
- vault_kv_policy_name = string,
- vault_kv_path = string,
- vault_kv_field_access_key = string,
- vault_kv_field_secret_key = string
- })
- description = "Set of properties to be able to fetch secret from vault"
- default = {
- use_vault_provider = false
- vault_kv_policy_name = "kv"
- vault_kv_path = "secret/data/minio"
- vault_kv_field_access_key = "access_key"
- vault_kv_field_secret_key = "secret_key"
- }
-}
-
-variable "volume_destination" {
- description = "Specifies where the volume should be mounted inside the task"
- type = string
- default = "/data/"
-}
diff --git a/fdio.infra.terraform/1n_nmd/minio_s3_gateway/versions.tf b/fdio.infra.terraform/1n_nmd/minio_s3_gateway/versions.tf
deleted file mode 100644
index b80610a525..0000000000
--- a/fdio.infra.terraform/1n_nmd/minio_s3_gateway/versions.tf
+++ /dev/null
@@ -1,13 +0,0 @@
-terraform {
- required_providers {
- nomad = {
- source = "hashicorp/nomad"
- version = "~> 1.4.15"
- }
- template = {
- source = "hashicorp/template"
- version = "~> 2.2.0"
- }
- }
- required_version = ">= 1.0.3"
-}