diff options
author | pmikus <pmikus@cisco.com> | 2021-03-07 08:57:13 +0000 |
---|---|---|
committer | Peter Mikus <pmikus@cisco.com> | 2021-03-11 07:07:03 +0000 |
commit | c318223fcd266c0ee2982e803c44e193c2023054 (patch) | |
tree | 4559bf49a5ab183d0c3a75a883edf40e7802222f /fdio.infra.terraform/1n_nmd/minio | |
parent | 221e2f4da7cb27954525d973d930cb8db4601c8f (diff) |
Infra: Switch csit-shim to fdiotools
+ use /u/fdiotools
+ use ubuntu 20.04
Signed-off-by: pmikus <pmikus@cisco.com>
Change-Id: I091e63a0d9e50de203b1527c7500b3864a616af6
Diffstat (limited to 'fdio.infra.terraform/1n_nmd/minio')
-rw-r--r-- | fdio.infra.terraform/1n_nmd/minio/conf/nomad/mc.hcl | 73 | ||||
-rw-r--r-- | fdio.infra.terraform/1n_nmd/minio/conf/nomad/minio.hcl | 223 | ||||
-rw-r--r-- | fdio.infra.terraform/1n_nmd/minio/main.tf | 82 | ||||
-rw-r--r-- | fdio.infra.terraform/1n_nmd/minio/outputs.tf | 4 | ||||
-rw-r--r-- | fdio.infra.terraform/1n_nmd/minio/variables.tf | 170 | ||||
-rw-r--r-- | fdio.infra.terraform/1n_nmd/minio/versions.tf | 13 |
6 files changed, 565 insertions, 0 deletions
diff --git a/fdio.infra.terraform/1n_nmd/minio/conf/nomad/mc.hcl b/fdio.infra.terraform/1n_nmd/minio/conf/nomad/mc.hcl new file mode 100644 index 0000000000..238003bb00 --- /dev/null +++ b/fdio.infra.terraform/1n_nmd/minio/conf/nomad/mc.hcl @@ -0,0 +1,73 @@ +job "${job_name}" { + # The "region" parameter specifies the region in which to execute the job. + # If omitted, this inherits the default region name of "global". + # region = "global" + # + # The "datacenters" parameter specifies the list of datacenters which should + # be considered when placing this task. This must be provided. + datacenters = "${datacenters}" + + # The "type" parameter controls the type of job, which impacts the scheduler's + # decision on placement. This configuration is optional and defaults to + # "service". For a full list of job types and their differences, please see + # the online documentation. + # + # For more information, please see the online documentation at: + # + # https://www.nomadproject.io/docs/jobspec/schedulers.html + # + type = "batch" + + # The "group" stanza defines a series of tasks that should be co-located on + # the same Nomad client. Any task within a group will be placed on the same + # client. + # + # For more information and examples on the "group" stanza, please see + # the online documentation at: + # + # https://www.nomadproject.io/docs/job-specification/group.html + # + group "prod-group1-mc" { + task "prod-task1-create-buckets" { + # The "driver" parameter specifies the task driver that should be used to + # run the task. + driver = "docker" + + %{ if use_vault_provider } + vault { + policies = "${vault_kv_policy_name}" + } + %{ endif } + + # The "config" stanza specifies the driver configuration, which is passed + # directly to the driver to start the task. The details of configurations + # are specific to each driver, so please see specific driver + # documentation for more information. + config { + image = "${image}" + entrypoint = [ + "/bin/sh", + "-c", + "${command}" + ] + dns_servers = [ "$${attr.unique.network.ip-address}" ] + privileged = false + } + + # The env stanza configures a list of environment variables to populate + # the task's environment before starting. + env { + %{ if use_vault_provider } + {{ with secret "${vault_kv_path}" }} + MINIO_ACCESS_KEY = "{{ .Data.data.${vault_kv_field_access_key} }}" + MINIO_SECRET_KEY = "{{ .Data.data.${vault_kv_field_secret_key} }}" + {{ end }} + %{ else } + MINIO_ACCESS_KEY = "${access_key}" + MINIO_SECRET_KEY = "${secret_key}" + %{ endif } + ${ envs } + } + } + } +} diff --git a/fdio.infra.terraform/1n_nmd/minio/conf/nomad/minio.hcl b/fdio.infra.terraform/1n_nmd/minio/conf/nomad/minio.hcl new file mode 100644 index 0000000000..3889b51a9f --- /dev/null +++ b/fdio.infra.terraform/1n_nmd/minio/conf/nomad/minio.hcl @@ -0,0 +1,223 @@ +job "${job_name}" { + # The "region" parameter specifies the region in which to execute the job. + # If omitted, this inherits the default region name of "global". + # region = "global" + # + # The "datacenters" parameter specifies the list of datacenters which should + # be considered when placing this task. This must be provided. + datacenters = "${datacenters}" + + # The "type" parameter controls the type of job, which impacts the scheduler's + # decision on placement. This configuration is optional and defaults to + # "service". For a full list of job types and their differences, please see + # the online documentation. + # + # https://www.nomadproject.io/docs/jobspec/schedulers + # + type = "service" + + update { + # The "max_parallel" parameter specifies the maximum number of updates to + # perform in parallel. In this case, this specifies to update a single task + # at a time. + max_parallel = 1 + + health_check = "checks" + + # The "min_healthy_time" parameter specifies the minimum time the allocation + # must be in the healthy state before it is marked as healthy and unblocks + # further allocations from being updated. + min_healthy_time = "10s" + + # The "healthy_deadline" parameter specifies the deadline in which the + # allocation must be marked as healthy after which the allocation is + # automatically transitioned to unhealthy. Transitioning to unhealthy will + # fail the deployment and potentially roll back the job if "auto_revert" is + # set to true. + healthy_deadline = "3m" + + # The "progress_deadline" parameter specifies the deadline in which an + # allocation must be marked as healthy. The deadline begins when the first + # allocation for the deployment is created and is reset whenever an allocation + # as part of the deployment transitions to a healthy state. If no allocation + # transitions to the healthy state before the progress deadline, the + # deployment is marked as failed. + progress_deadline = "10m" + +%{ if use_canary } + # The "canary" parameter specifies that changes to the job that would result + # in destructive updates should create the specified number of canaries + # without stopping any previous allocations. Once the operator determines the + # canaries are healthy, they can be promoted which unblocks a rolling update + # of the remaining allocations at a rate of "max_parallel". + # + # Further, setting "canary" equal to the count of the task group allows + # blue/green deployments. When the job is updated, a full set of the new + # version is deployed and upon promotion the old version is stopped. + canary = 1 + + # Specifies if the job should auto-promote to the canary version when all + # canaries become healthy during a deployment. Defaults to false which means + # canaries must be manually updated with the nomad deployment promote + # command. + auto_promote = true + + # The "auto_revert" parameter specifies if the job should auto-revert to the + # last stable job on deployment failure. A job is marked as stable if all the + # allocations as part of its deployment were marked healthy. + auto_revert = true +%{ endif } + } + + # All groups in this job should be scheduled on different hosts. + constraint { + operator = "distinct_hosts" + value = "true" + } + + # The "group" stanza defines a series of tasks that should be co-located on + # the same Nomad client. Any task within a group will be placed on the same + # client. + # + # https://www.nomadproject.io/docs/job-specification/group + # + group "prod-group1-minio" { + # The "count" parameter specifies the number of the task groups that should + # be running under this group. This value must be non-negative and defaults + # to 1. + count = ${group_count} + + # https://www.nomadproject.io/docs/job-specification/volume + %{ if use_host_volume } + volume "prod-volume1-minio" { + type = "host" + read_only = false + source = "${host_volume}" + } + %{ endif } + + # The restart stanza configures a tasks's behavior on task failure. Restarts + # happen on the client that is running the task. + # + # https://www.nomadproject.io/docs/job-specification/restart + # + restart { + interval = "30m" + attempts = 40 + delay = "15s" + mode = "delay" + } + + # The "task" stanza creates an individual unit of work, such as a Docker + # container, web application, or batch processing. + # + # https://www.nomadproject.io/docs/job-specification/task.html + # + task "prod-task1-minio" { + # The "driver" parameter specifies the task driver that should be used to + # run the task. + driver = "docker" + + %{ if use_host_volume } + volume_mount { + volume = "prod-volume1-minio" + destination = "${data_dir}" + read_only = false + } + %{ endif } + + %{ if use_vault_provider } + vault { + policies = "${vault_kv_policy_name}" + } + %{ endif } + + # The "config" stanza specifies the driver configuration, which is passed + # directly to the driver to start the task. The details of configurations + # are specific to each driver, so please see specific driver + # documentation for more information. + config { + image = "${image}" + dns_servers = [ "172.17.0.1" ] + network_mode = "host" + command = "server" + args = [ "${host}:${port}${data_dir}" ] + port_map { + http = ${port} + } + privileged = false + } + + # The env stanza configures a list of environment variables to populate + # the task's environment before starting. + env { +%{ if use_vault_provider } +{{ with secret "${vault_kv_path}" }} + MINIO_ACCESS_KEY = "{{ .Data.data.${vault_kv_field_access_key} }}" + MINIO_SECRET_KEY = "{{ .Data.data.${vault_kv_field_secret_key} }}" +{{ end }} +%{ else } + MINIO_ACCESS_KEY = "${access_key}" + MINIO_SECRET_KEY = "${secret_key}" +%{ endif } + ${ envs } + } + + # The service stanza instructs Nomad to register a service with Consul. + # + # https://www.nomadproject.io/docs/job-specification/service + # + service { + name = "${service_name}" + port = "http" + tags = [ "storage$${NOMAD_ALLOC_INDEX}" ] + check { + name = "Min.io Server HTTP Check Live" + type = "http" + port = "http" + protocol = "http" + method = "GET" + path = "/minio/health/live" + interval = "10s" + timeout = "2s" + } + check { + name = "Min.io Server HTTP Check Ready" + type = "http" + port = "http" + protocol = "http" + method = "GET" + path = "/minio/health/ready" + interval = "10s" + timeout = "2s" + } + } + + # The "resources" stanza describes the requirements a task needs to + # execute. Resource requirements include memory, network, cpu, and more. + # This ensures the task will execute on a machine that contains enough + # resource capacity. + # + # https://www.nomadproject.io/docs/job-specification/resources + # + resources { + cpu = ${cpu} + memory = ${memory} + # The network stanza specifies the networking requirements for the task + # group, including the network mode and port allocations. When scheduling + # jobs in Nomad they are provisioned across your fleet of machines along + # with other jobs and services. Because you don't know in advance what host + # your job will be provisioned on, Nomad will provide your tasks with + # network configuration when they start up. + # + # https://www.nomadproject.io/docs/job-specification/network + # + network { + port "http" { + static = ${port} + } + } + } + } + } +} diff --git a/fdio.infra.terraform/1n_nmd/minio/main.tf b/fdio.infra.terraform/1n_nmd/minio/main.tf new file mode 100644 index 0000000000..62d143f4b1 --- /dev/null +++ b/fdio.infra.terraform/1n_nmd/minio/main.tf @@ -0,0 +1,82 @@ +locals { + datacenters = join(",", var.nomad_datacenters) + minio_env_vars = join("\n", + concat([ + ], var.minio_envs) + ) + mc_env_vars = join("\n", + concat([ + ], var.mc_envs) + ) + mc_formatted_bucket_list = formatlist("LOCALMINIO/%s", var.minio_buckets) + mc_add_config_command = concat( + [ + "mc", + "config", + "host", + "add", + "LOCALMINIO", + "http://${var.minio_service_name}.service.consul:${var.minio_port}", + "$MINIO_ACCESS_KEY", + "$MINIO_SECRET_KEY", + ]) + mc_create_bucket_command = concat(["mc", "mb", "-p"], local.mc_formatted_bucket_list) + command = join(" ", concat(local.mc_add_config_command, ["&&"], local.mc_create_bucket_command, [";"], concat(var.mc_extra_commands))) +} + +data "template_file" "nomad_job_minio" { + template = file("${path.module}/conf/nomad/minio.hcl") + vars = { + job_name = var.minio_job_name + datacenters = local.datacenters + use_canary = var.minio_use_canary + group_count = var.minio_group_count + use_host_volume = var.minio_use_host_volume + host_volume = var.nomad_host_volume + service_name = var.minio_service_name + host = var.minio_host + port = var.minio_port + upstreams = jsonencode(var.minio_upstreams) + cpu_proxy = var.minio_resource_proxy.cpu + memory_proxy = var.minio_resource_proxy.memory + use_vault_provider = var.minio_vault_secret.use_vault_provider + image = var.minio_container_image + access_key = var.minio_access_key + secret_key = var.minio_secret_key + data_dir = var.minio_data_dir + envs = local.minio_env_vars + cpu = var.minio_cpu + memory = var.minio_memory + } +} + +data "template_file" "nomad_job_mc" { + template = file("${path.module}/conf/nomad/mc.hcl") + vars = { + job_name = var.mc_job_name + service_name = var.mc_service_name + datacenters = local.datacenters + minio_service_name = var.minio_service_name + minio_port = var.minio_port + image = var.mc_container_image + access_key = var.minio_access_key + secret_key = var.minio_secret_key + use_vault_provider = var.minio_vault_secret.use_vault_provider + envs = local.mc_env_vars + command = local.command + } +} + +resource "nomad_job" "nomad_job_minio" { + jobspec = data.template_file.nomad_job_minio.rendered + detach = false +} + +#resource "nomad_job" "nomad_job_mc" { +# jobspec = data.template_file.nomad_job_mc.rendered +# detach = false +# +# depends_on = [ +# nomad_job.nomad_job_minio +# ] +#}
\ No newline at end of file diff --git a/fdio.infra.terraform/1n_nmd/minio/outputs.tf b/fdio.infra.terraform/1n_nmd/minio/outputs.tf new file mode 100644 index 0000000000..309cd3b9d0 --- /dev/null +++ b/fdio.infra.terraform/1n_nmd/minio/outputs.tf @@ -0,0 +1,4 @@ +output "minio_service_name" { + description = "Minio service name" + value = data.template_file.nomad_job_minio.vars.service_name +}
\ No newline at end of file diff --git a/fdio.infra.terraform/1n_nmd/minio/variables.tf b/fdio.infra.terraform/1n_nmd/minio/variables.tf new file mode 100644 index 0000000000..dbac3465ee --- /dev/null +++ b/fdio.infra.terraform/1n_nmd/minio/variables.tf @@ -0,0 +1,170 @@ +# Nomad +variable "nomad_datacenters" { + description = "Nomad data centers" + type = list(string) + default = [ "dc1" ] +} + +variable "nomad_host_volume" { + description = "Nomad Host Volume" + type = string + default = "persistence" +} + +# Minio +variable "minio_job_name" { + description = "Minio job name" + type = string + default = "minio" +} + +variable "minio_service_name" { + description = "Minio service name" + type = string + default = "minio" +} + +variable "minio_group_count" { + description = "Number of Minio group instances" + type = number + default = 1 +} + +variable "minio_host" { + description = "Minio host" + type = string + default = "127.0.0.1" +} + +variable "minio_port" { + description = "Minio port" + type = number + default = 9000 +} + +variable "minio_cpu" { + description = "CPU allocation for Minio" + type = number + default = 40000 +} + +variable "minio_memory" { + description = "Memory allocation for Minio" + type = number + default = 40000 +} + +variable "minio_container_image" { + description = "Minio docker image" + type = string + default = "minio/minio:latest" +} + +variable "minio_envs" { + description = "Minio environment variables" + type = list(string) + default = [] +} + +variable "minio_access_key" { + description = "Minio access key" + type = string + default = "minio" +} + +variable "minio_secret_key" { + description = "Minio secret key" + type = string + default = "minio123" +} + +variable "minio_data_dir" { + description = "Minio server data dir" + type = string + default = "/data/" +} + +variable "minio_use_host_volume" { + description = "Use Nomad host volume feature" + type = bool + default = false +} + +variable "minio_use_canary" { + description = "Uses canary deployment for Minio" + type = bool + default = false +} + +variable "minio_vault_secret" { + description = "Set of properties to be able to fetch secret from vault" + type = object({ + use_vault_provider = bool, + vault_kv_policy_name = string, + vault_kv_path = string, + vault_kv_field_access_key = string, + vault_kv_field_secret_key = string + }) +} + +variable "minio_resource_proxy" { + description = "Minio proxy resources" + type = object({ + cpu = number, + memory = number + }) + default = { + cpu = 200, + memory = 128 + } + validation { + condition = var.minio_resource_proxy.cpu >= 200 && var.minio_resource_proxy.memory >= 128 + error_message = "Proxy resource must be at least: cpu=200, memory=128." + } +} + +# MC +variable "mc_job_name" { + description = "Minio client job name" + type = string + default = "mc" +} + +variable "mc_service_name" { + description = "Minio client service name" + type = string + default = "mc" +} + +variable "mc_container_image" { + description = "Minio client docker image" + type = string + default = "minio/mc:latest" +} + +variable "mc_envs" { + description = "Minio client environment variables" + type = list(string) + default = [] +} + +variable "minio_buckets" { + description = "List of buckets to create on startup" + type = list(string) + default = [] +} + +variable "minio_upstreams" { + description = "List of upstream services (list of object with service_name, port)" + type = list(object({ + service_name = string, + port = number, + })) + default = [] +} + +variable "mc_extra_commands" { + description = "Extra commands to run in MC container after creating buckets" + type = list(string) + default = [""] +}
\ No newline at end of file diff --git a/fdio.infra.terraform/1n_nmd/minio/versions.tf b/fdio.infra.terraform/1n_nmd/minio/versions.tf new file mode 100644 index 0000000000..960bd4bba6 --- /dev/null +++ b/fdio.infra.terraform/1n_nmd/minio/versions.tf @@ -0,0 +1,13 @@ +terraform { + required_providers { + nomad = { + source = "hashicorp/nomad" + version = "~> 1.4.9" + } + template = { + source = "hashicorp/template" + version = "~> 2.1.2" + } + } + required_version = ">= 0.13" +} |