aboutsummaryrefslogtreecommitdiffstats
path: root/fdio.infra.terraform/terraform-nomad-loki/conf/nomad/loki.hcl.tftpl
diff options
context:
space:
mode:
authorPeter Mikus <pmikus@cisco.com>2022-08-22 10:30:43 +0000
committerPeter Mikus <pmikus@cisco.com>2022-08-22 10:30:43 +0000
commit50553186120f97a4ac418e2226dbdf1ace436df1 (patch)
tree95d919e15b7ea371d44e4f9f2a0255e48f64e4c6 /fdio.infra.terraform/terraform-nomad-loki/conf/nomad/loki.hcl.tftpl
parentf957fc746170b89e3fd3914d7a439e6b53164b50 (diff)
fix(terraform): Cleanup
Signed-off-by: Peter Mikus <pmikus@cisco.com> Change-Id: Ib3ea836c0c3c3ebc564745933c441c46c6b8467d
Diffstat (limited to 'fdio.infra.terraform/terraform-nomad-loki/conf/nomad/loki.hcl.tftpl')
-rw-r--r--fdio.infra.terraform/terraform-nomad-loki/conf/nomad/loki.hcl.tftpl261
1 files changed, 0 insertions, 261 deletions
diff --git a/fdio.infra.terraform/terraform-nomad-loki/conf/nomad/loki.hcl.tftpl b/fdio.infra.terraform/terraform-nomad-loki/conf/nomad/loki.hcl.tftpl
deleted file mode 100644
index 7b38437566..0000000000
--- a/fdio.infra.terraform/terraform-nomad-loki/conf/nomad/loki.hcl.tftpl
+++ /dev/null
@@ -1,261 +0,0 @@
-job "${job_name}" {
- # The "region" parameter specifies the region in which to execute the job.
- # If omitted, this inherits the default region name of "global".
- # region = "${region}"
-
- # The "datacenters" parameter specifies the list of datacenters which should
- # be considered when placing this task. This must be provided.
- datacenters = "${datacenters}"
-
- # The "type" parameter controls the type of job, which impacts the scheduler's
- # decision on placement. This configuration is optional and defaults to
- # "service". For a full list of job types and their differences, please see
- # the online documentation.
- #
- # https://www.nomadproject.io/docs/jobspec/schedulers
- #
- type = "service"
-
- update {
- # The "max_parallel" parameter specifies the maximum number of updates to
- # perform in parallel. In this case, this specifies to update a single task
- # at a time.
- max_parallel = ${max_parallel}
-
- health_check = "checks"
-
- # The "min_healthy_time" parameter specifies the minimum time the allocation
- # must be in the healthy state before it is marked as healthy and unblocks
- # further allocations from being updated.
- min_healthy_time = "10s"
-
- # The "healthy_deadline" parameter specifies the deadline in which the
- # allocation must be marked as healthy after which the allocation is
- # automatically transitioned to unhealthy. Transitioning to unhealthy will
- # fail the deployment and potentially roll back the job if "auto_revert" is
- # set to true.
- healthy_deadline = "3m"
-
- # The "progress_deadline" parameter specifies the deadline in which an
- # allocation must be marked as healthy. The deadline begins when the first
- # allocation for the deployment is created and is reset whenever an allocation
- # as part of the deployment transitions to a healthy state. If no allocation
- # transitions to the healthy state before the progress deadline, the
- # deployment is marked as failed.
- progress_deadline = "10m"
-
-%{ if use_canary }
- # The "canary" parameter specifies that changes to the job that would result
- # in destructive updates should create the specified number of canaries
- # without stopping any previous allocations. Once the operator determines the
- # canaries are healthy, they can be promoted which unblocks a rolling update
- # of the remaining allocations at a rate of "max_parallel".
- #
- # Further, setting "canary" equal to the count of the task group allows
- # blue/green deployments. When the job is updated, a full set of the new
- # version is deployed and upon promotion the old version is stopped.
- canary = ${canary}
-
- # Specifies if the job should auto-promote to the canary version when all
- # canaries become healthy during a deployment. Defaults to false which means
- # canaries must be manually updated with the nomad deployment promote
- # command.
- auto_promote = ${auto_promote}
-
- # The "auto_revert" parameter specifies if the job should auto-revert to the
- # last stable job on deployment failure. A job is marked as stable if all the
- # allocations as part of its deployment were marked healthy.
- auto_revert = ${auto_revert}
-%{ endif }
- }
-
- # The "group" stanza defines a series of tasks that should be co-located on
- # the same Nomad client. Any task within a group will be placed on the same
- # client.
- #
- # https://www.nomadproject.io/docs/job-specification/group
- #
- group "${job_name}-group-1" {
- # The "count" parameter specifies the number of the task groups that should
- # be running under this group. This value must be non-negative and defaults
- # to 1.
- count = ${group_count}
-
- # The volume stanza allows the group to specify that it requires a given
- # volume from the cluster. The key of the stanza is the name of the volume
- # as it will be exposed to task configuration.
- #
- # https://www.nomadproject.io/docs/job-specification/volume
- %{ if use_host_volume }
- volume "${job_name}-volume-1" {
- type = "host"
- read_only = false
- source = "${volume_source}"
- }
- %{ endif }
-
- # The restart stanza configures a tasks's behavior on task failure. Restarts
- # happen on the client that is running the task.
- #
- # https://www.nomadproject.io/docs/job-specification/restart
- #
- restart {
- interval = "30m"
- attempts = 40
- delay = "15s"
- mode = "delay"
- }
-
- # The constraint allows restricting the set of eligible nodes. Constraints
- # may filter on attributes or client metadata.
- #
- # https://www.nomadproject.io/docs/job-specification/constraint
- #
- constraint {
- attribute = "$${attr.cpu.arch}"
- operator = "!="
- value = "arm64"
- }
-
- constraint {
- attribute = "$${node.class}"
- value = "builder"
- }
-
- # The network stanza specifies the networking requirements for the task
- # group, including the network mode and port allocations. When scheduling
- # jobs in Nomad they are provisioned across your fleet of machines along
- # with other jobs and services. Because you don't know in advance what host
- # your job will be provisioned on, Nomad will provide your tasks with
- # network configuration when they start up.
- #
- # https://www.nomadproject.io/docs/job-specification/network
- #
- network {
- port "${service_name}" {
- static = ${port}
- to = ${port}
- }
- }
-
- # The "task" stanza creates an individual unit of work, such as a Docker
- # container, web application, or batch processing.
- #
- # https://www.nomadproject.io/docs/job-specification/task
- #
- task "${job_name}-task-1" {
- # The "driver" parameter specifies the task driver that should be used to
- # run the task.
- driver = "exec"
-
- %{ if use_host_volume }
- volume_mount {
- volume = "${job_name}-volume-1"
- destination = "${volume_destination}"
- read_only = false
- }
- %{ endif }
-
- %{ if use_vault_provider }
- vault {
- policies = "${vault_kv_policy_name}"
- }
- %{ endif }
-
- # The "config" stanza specifies the driver configuration, which is passed
- # directly to the driver to start the task. The details of configurations
- # are specific to each driver, so please see specific driver
- # documentation for more information.
- config {
- command = "local/loki-linux-amd64"
- }
-
- # The artifact stanza instructs Nomad to fetch and unpack a remote resource,
- # such as a file, tarball, or binary. Nomad downloads artifacts using the
- # popular go-getter library, which permits downloading artifacts from a
- # variety of locations using a URL as the input source.
- #
- # https://www.nomadproject.io/docs/job-specification/artifact
- #
- artifact {
- source = "${url}"
- args = [
- "-config.file secrets/config.yml"
- ]
- }
-
- template {
- change_mode = "noop"
- change_signal = "SIGINT"
- destination = "secrets/loki.yml"
- data = <<EOH
----
-auth_enabled: false
-
-server:
- http_listen_port: 3100
- http_listen_address: 127.0.0.1
-
-schema_config:
- configs:
- - from: 2020-05-15
- store: boltdb
- object_store: filesystem
- schema: v11
- index:
- prefix: index_
- period: 168h
-
-storage_config:
- boltdb:
- directory: /tmp/loki/index
-
- filesystem:
- directory: /tmp/loki/chunks
-
- aws:
- bucketnames: loki
- endpoint: http://storage.service.consul:9000
- access_key_id: storage
- secret_access_key: Storage1234
- insecure: false
- sse_encryption: false
- http_config:
- idle_conn_timeout: 90s
- response_header_timeout: 0s
- insecure_skip_verify: false
- s3forcepathstyle: true
-EOH
- }
-
- # The service stanza instructs Nomad to register a service with Consul.
- #
- # https://www.nomadproject.io/docs/job-specification/service
- #
- service {
- name = "${service_name}"
- port = "${service_name}"
- tags = [ "${service_name}$${NOMAD_ALLOC_INDEX}" ]
- check {
- name = "Loki Check Live"
- type = "http"
- path = "/-/healthy"
- interval = "10s"
- timeout = "2s"
- }
- }
-
- # The "resources" stanza describes the requirements a task needs to
- # execute. Resource requirements include memory, network, cpu, and more.
- # This ensures the task will execute on a machine that contains enough
- # resource capacity.
- #
- # https://www.nomadproject.io/docs/job-specification/resources
- #
- resources {
- cpu = ${cpu}
- memory = ${memory}
- }
- }
- }
-}