From fd4d85865e145f12330a4266be48fbdd6e919cf4 Mon Sep 17 00:00:00 2001 From: pmikus Date: Wed, 9 Dec 2020 20:11:42 +0000 Subject: Refactor storage solution + Minio terraform module + XL mode enabled with erasure code + Upload script as a sample + Nginx terraform module + Updating ansible to reflect changes Signed-off-by: pmikus Change-Id: Ia8c439b749aa0de82bd6f1d0cfbecce4d7000a8f --- resources/tools/terraform/1n_nmd/.gitignore | 4 - resources/tools/terraform/1n_nmd/main.tf | 40 -- .../terraform/1n_nmd/prod_storage/prod-nginx.nomad | 270 ---------- .../1n_nmd/prod_storage/prod-storage.nomad | 256 --------- .../terraform/1n_nmd/prod_storage/resources.tf | 9 - .../1n_nmd/prod_vpp_device/prod_csit_shim.nomad | 171 ------ .../terraform/1n_nmd/prod_vpp_device/resources.tf | 4 - resources/tools/terraform/1n_nmd/variables.tf | 5 - resources/tools/terraform/2n_aws_c5n/.gitignore | 4 - resources/tools/terraform/2n_aws_c5n/main.tf | 304 ----------- resources/tools/terraform/2n_aws_c5n/nic.tf | 67 --- resources/tools/terraform/3n_aws_c5n/.gitignore | 4 - resources/tools/terraform/3n_aws_c5n/main.tf | 361 ------------- resources/tools/terraform/3n_aws_c5n/nic.tf | 101 ---- resources/tools/terraform/3n_azure_fsv2/.gitignore | 4 - resources/tools/terraform/3n_azure_fsv2/main.tf | 593 --------------------- resources/tools/terraform/3n_azure_fsv2/nic.tf | 133 ----- .../lf_inventory/host_vars/10.32.8.14.yaml | 1 + .../lf_inventory/host_vars/10.32.8.15.yaml | 1 + .../lf_inventory/host_vars/10.32.8.16.yaml | 1 + .../lf_inventory/host_vars/10.32.8.17.yaml | 59 ++ .../ansible/inventories/lf_inventory/hosts | 1 + 22 files changed, 63 insertions(+), 2330 deletions(-) delete mode 100644 resources/tools/terraform/1n_nmd/.gitignore delete mode 100644 resources/tools/terraform/1n_nmd/main.tf delete mode 100644 resources/tools/terraform/1n_nmd/prod_storage/prod-nginx.nomad delete mode 100644 resources/tools/terraform/1n_nmd/prod_storage/prod-storage.nomad delete mode 100644 resources/tools/terraform/1n_nmd/prod_storage/resources.tf delete mode 100644 resources/tools/terraform/1n_nmd/prod_vpp_device/prod_csit_shim.nomad delete mode 100644 resources/tools/terraform/1n_nmd/prod_vpp_device/resources.tf delete mode 100644 resources/tools/terraform/1n_nmd/variables.tf delete mode 100644 resources/tools/terraform/2n_aws_c5n/.gitignore delete mode 100644 resources/tools/terraform/2n_aws_c5n/main.tf delete mode 100644 resources/tools/terraform/2n_aws_c5n/nic.tf delete mode 100644 resources/tools/terraform/3n_aws_c5n/.gitignore delete mode 100644 resources/tools/terraform/3n_aws_c5n/main.tf delete mode 100644 resources/tools/terraform/3n_aws_c5n/nic.tf delete mode 100644 resources/tools/terraform/3n_azure_fsv2/.gitignore delete mode 100644 resources/tools/terraform/3n_azure_fsv2/main.tf delete mode 100644 resources/tools/terraform/3n_azure_fsv2/nic.tf create mode 100644 resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.32.8.17.yaml (limited to 'resources') diff --git a/resources/tools/terraform/1n_nmd/.gitignore b/resources/tools/terraform/1n_nmd/.gitignore deleted file mode 100644 index fc64f0039f..0000000000 --- a/resources/tools/terraform/1n_nmd/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.terraform/ -.terraform.tfstate.lock.info -terraform.tfstate -terraform.tfstate.backup diff --git a/resources/tools/terraform/1n_nmd/main.tf b/resources/tools/terraform/1n_nmd/main.tf deleted file mode 100644 index 330f647476..0000000000 --- a/resources/tools/terraform/1n_nmd/main.tf +++ /dev/null @@ -1,40 +0,0 @@ -terraform { - # This module is now only being tested with Terraform 0.13.5+. - required_version = ">= 0.13.5" -} - -provider "nomad" { - address = var.nomad_provider_address - alias = "yul1" -} - -# For convenience in simple configurations, a child module automatically -# inherits default (un-aliased) provider configurations from its parent. -# This means that explicit provider blocks appear only in the root module, -# and downstream modules can simply declare resources for that provider -# and have them automatically associated with the root provider -# configurations. - -# prod_storage -# + prod-group1-nginx -# + prod-group1-storage -# + services -# + docs.nginx.service.consul -# + logs.nginx.service.consul -# + storage.nginx.service.consul -module "prod_storage" { - source = "./prod_storage" - providers = { - nomad = nomad.yul1 - } -} - -# prod_vpp_device -# + prod-csit-shim-amd -# + prod-csit-shim-arm -module "prod_vpp_device" { - source = "./prod_vpp_device" - providers = { - nomad = nomad.yul1 - } -} \ No newline at end of file diff --git a/resources/tools/terraform/1n_nmd/prod_storage/prod-nginx.nomad b/resources/tools/terraform/1n_nmd/prod_storage/prod-nginx.nomad deleted file mode 100644 index 2af62a06c3..0000000000 --- a/resources/tools/terraform/1n_nmd/prod_storage/prod-nginx.nomad +++ /dev/null @@ -1,270 +0,0 @@ -job "prod-nginx" { - # The "region" parameter specifies the region in which to execute the job. - # If omitted, this inherits the default region name of "global". - # region = "global" - # - # The "datacenters" parameter specifies the list of datacenters which should - # be considered when placing this task. This must be provided. - datacenters = [ "yul1" ] - - # The "type" parameter controls the type of job, which impacts the scheduler's - # decision on placement. This configuration is optional and defaults to - # "service". For a full list of job types and their differences, please see - # the online documentation. - # - # For more information, please see the online documentation at: - # - # https://www.nomadproject.io/docs/jobspec/schedulers.html - # - type = "service" - - update { - # The "max_parallel" parameter specifies the maximum number of updates to - # perform in parallel. In this case, this specifies to update a single task - # at a time. - max_parallel = 0 - - # The "min_healthy_time" parameter specifies the minimum time the allocation - # must be in the healthy state before it is marked as healthy and unblocks - # further allocations from being updated. - min_healthy_time = "10s" - - # The "healthy_deadline" parameter specifies the deadline in which the - # allocation must be marked as healthy after which the allocation is - # automatically transitioned to unhealthy. Transitioning to unhealthy will - # fail the deployment and potentially roll back the job if "auto_revert" is - # set to true. - healthy_deadline = "3m" - - # The "progress_deadline" parameter specifies the deadline in which an - # allocation must be marked as healthy. The deadline begins when the first - # allocation for the deployment is created and is reset whenever an allocation - # as part of the deployment transitions to a healthy state. If no allocation - # transitions to the healthy state before the progress deadline, the - # deployment is marked as failed. - progress_deadline = "10m" - - # The "auto_revert" parameter specifies if the job should auto-revert to the - # last stable job on deployment failure. A job is marked as stable if all the - # allocations as part of its deployment were marked healthy. - auto_revert = false - - # The "canary" parameter specifies that changes to the job that would result - # in destructive updates should create the specified number of canaries - # without stopping any previous allocations. Once the operator determines the - # canaries are healthy, they can be promoted which unblocks a rolling update - # of the remaining allocations at a rate of "max_parallel". - # - # Further, setting "canary" equal to the count of the task group allows - # blue/green deployments. When the job is updated, a full set of the new - # version is deployed and upon promotion the old version is stopped. - canary = 0 - } - - # The reschedule stanza specifies the group's rescheduling strategy. If - # specified at the job level, the configuration will apply to all groups - # within the job. If the reschedule stanza is present on both the job and the - # group, they are merged with the group stanza taking the highest precedence - # and then the job. - reschedule { - delay = "30s" - delay_function = "constant" - unlimited = true - } - - - # The "group" stanza defines a series of tasks that should be co-located on - # the same Nomad client. Any task within a group will be placed on the same - # client. - # - # For more information and examples on the "group" stanza, please see - # the online documentation at: - # - # https://www.nomadproject.io/docs/job-specification/group.html - # - group "prod-group1-nginx" { - # The "count" parameter specifies the number of the task groups that should - # be running under this group. This value must be non-negative and defaults - # to 1. - count = 1 - - # The restart stanza configures a tasks's behavior on task failure. Restarts - # happen on the client that is running the task. - restart { - interval = "10m" - attempts = 2 - delay = "15s" - mode = "fail" - } - - # All groups in this job should be scheduled on different hosts. - constraint { - operator = "distinct_hosts" - value = "false" - } - - # Prioritize one node. - affinity { - attribute = "${attr.unique.hostname}" - value = "s46-nomad" - weight = 100 - } - - # The volume stanza allows the group to specify that it requires a given - # volume from the cluster. - # - # For more information and examples on the "volume" stanza, please see - # the online documentation at: - # - # https://www.nomadproject.io/docs/job-specification/volume - volume "prod-volume1-storage" { - type = "host" - read_only = false - source = "prod-volume-data1-1" - } - - # The "task" stanza creates an individual unit of work, such as a Docker - # container, web application, or batch processing. - # - # For more information and examples on the "task" stanza, please see - # the online documentation at: - # - # https://www.nomadproject.io/docs/job-specification/task.html - # - task "prod-task1-nginx" { - # The "driver" parameter specifies the task driver that should be used to - # run the task. - driver = "docker" - - volume_mount { - volume = "prod-volume1-storage" - destination = "/data/" - read_only = true - } - - # The "config" stanza specifies the driver configuration, which is passed - # directly to the driver to start the task. The details of configurations - # are specific to each driver, so please see specific driver - # documentation for more information. - config { - image = "nginx:stable" - dns_servers = [ "${attr.unique.network.ip-address}" ] - port_map { - https = 443 - } - privileged = false - volumes = [ - "/etc/consul.d/ssl/consul.pem:/etc/ssl/certs/nginx-cert.pem", - "/etc/consul.d/ssl/consul-key.pem:/etc/ssl/private/nginx-key.pem", - "custom/logs.conf:/etc/nginx/conf.d/logs.conf", - "custom/docs.conf:/etc/nginx/conf.d/docs.conf" - ] - } - - # The "template" stanza instructs Nomad to manage a template, such as - # a configuration file or script. This template can optionally pull data - # from Consul or Vault to populate runtime configuration data. - # - # For more information and examples on the "template" stanza, please see - # the online documentation at: - # - # https://www.nomadproject.io/docs/job-specification/template.html - # - template { - data = <