aboutsummaryrefslogtreecommitdiffstats
path: root/fdio.infra.terraform/2n_aws_c5n
diff options
context:
space:
mode:
authorpmikus <pmikus@cisco.com>2021-03-07 08:57:13 +0000
committerPeter Mikus <pmikus@cisco.com>2021-03-11 07:07:03 +0000
commitc318223fcd266c0ee2982e803c44e193c2023054 (patch)
tree4559bf49a5ab183d0c3a75a883edf40e7802222f /fdio.infra.terraform/2n_aws_c5n
parent221e2f4da7cb27954525d973d930cb8db4601c8f (diff)
Infra: Switch csit-shim to fdiotools
+ use /u/fdiotools + use ubuntu 20.04 Signed-off-by: pmikus <pmikus@cisco.com> Change-Id: I091e63a0d9e50de203b1527c7500b3864a616af6
Diffstat (limited to 'fdio.infra.terraform/2n_aws_c5n')
-rw-r--r--fdio.infra.terraform/2n_aws_c5n/.gitignore5
-rw-r--r--fdio.infra.terraform/2n_aws_c5n/deploy/main.tf390
-rw-r--r--fdio.infra.terraform/2n_aws_c5n/deploy/variables.tf143
-rw-r--r--fdio.infra.terraform/2n_aws_c5n/deploy/versions.tf17
-rw-r--r--fdio.infra.terraform/2n_aws_c5n/main.tf53
5 files changed, 608 insertions, 0 deletions
diff --git a/fdio.infra.terraform/2n_aws_c5n/.gitignore b/fdio.infra.terraform/2n_aws_c5n/.gitignore
new file mode 100644
index 0000000000..40f77d862f
--- /dev/null
+++ b/fdio.infra.terraform/2n_aws_c5n/.gitignore
@@ -0,0 +1,5 @@
+.terraform/
+.terraform.tfstate.lock.info
+.terraform.lock.hcl
+terraform.tfstate
+terraform.tfstate.backup
diff --git a/fdio.infra.terraform/2n_aws_c5n/deploy/main.tf b/fdio.infra.terraform/2n_aws_c5n/deploy/main.tf
new file mode 100644
index 0000000000..f97d974b38
--- /dev/null
+++ b/fdio.infra.terraform/2n_aws_c5n/deploy/main.tf
@@ -0,0 +1,390 @@
+provider "aws" {
+ region = var.region
+}
+
+resource "aws_vpc" "CSITVPC" {
+ cidr_block = var.vpc_cidr_mgmt
+
+ tags = {
+ "Name" = "${var.resources_name_prefix}_${var.testbed_name}-vpc"
+ "Environment" = var.environment_name
+ }
+}
+
+resource "aws_security_group" "CSITSG" {
+ name = "${var.resources_name_prefix}_${var.testbed_name}-sg"
+ description = "Allow inbound traffic"
+ vpc_id = aws_vpc.CSITVPC.id
+ depends_on = [aws_vpc.CSITVPC]
+
+ ingress {
+ from_port = 22
+ to_port = 22
+ protocol = "tcp"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+
+ ingress {
+ from_port = 0
+ to_port = 0
+ protocol = -1
+ self = true
+ }
+
+ egress {
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+
+ tags = {
+ "Name" = "${var.resources_name_prefix}_${var.testbed_name}-sg"
+ "Environment" = var.environment_name
+ }
+}
+
+resource "aws_vpc_ipv4_cidr_block_association" "b" {
+ vpc_id = aws_vpc.CSITVPC.id
+ cidr_block = var.vpc_cidr_b
+ depends_on = [aws_vpc.CSITVPC]
+}
+resource "aws_vpc_ipv4_cidr_block_association" "c" {
+ vpc_id = aws_vpc.CSITVPC.id
+ cidr_block = var.vpc_cidr_c
+ depends_on = [aws_vpc.CSITVPC]
+}
+resource "aws_vpc_ipv4_cidr_block_association" "d" {
+ vpc_id = aws_vpc.CSITVPC.id
+ cidr_block = var.vpc_cidr_d
+ depends_on = [aws_vpc.CSITVPC]
+}
+
+# Subnets
+resource "aws_subnet" "mgmt" {
+ vpc_id = aws_vpc.CSITVPC.id
+ cidr_block = var.vpc_cidr_mgmt
+ availability_zone = var.avail_zone
+ depends_on = [aws_vpc.CSITVPC]
+
+ tags = {
+ "Environment" = var.environment_name
+ }
+}
+resource "aws_subnet" "b" {
+ vpc_id = aws_vpc.CSITVPC.id
+ cidr_block = var.vpc_cidr_b
+ availability_zone = var.avail_zone
+ depends_on = [aws_vpc.CSITVPC, aws_vpc_ipv4_cidr_block_association.b]
+
+ tags = {
+ "Environment" = var.environment_name
+ }
+}
+resource "aws_subnet" "c" {
+ vpc_id = aws_vpc.CSITVPC.id
+ cidr_block = var.vpc_cidr_c
+ availability_zone = var.avail_zone
+ depends_on = [aws_vpc.CSITVPC, aws_vpc_ipv4_cidr_block_association.c]
+
+ tags = {
+ "Environment" = var.environment_name
+ }
+}
+resource "aws_subnet" "d" {
+ vpc_id = aws_vpc.CSITVPC.id
+ cidr_block = var.vpc_cidr_d
+ availability_zone = var.avail_zone
+ depends_on = [aws_vpc.CSITVPC, aws_vpc_ipv4_cidr_block_association.d]
+
+ tags = {
+ "Environment" = var.environment_name
+ }
+}
+
+resource "aws_internet_gateway" "CSITGW" {
+ vpc_id = aws_vpc.CSITVPC.id
+ depends_on = [aws_vpc.CSITVPC]
+
+ tags = {
+ "Environment" = var.environment_name
+ }
+}
+
+# SSH keypair
+# Temporary key for provisioning only
+resource "tls_private_key" "CSITTLS" {
+ algorithm = "RSA"
+ rsa_bits = 4096
+}
+resource "aws_key_pair" "CSITKP" {
+ key_name = "${var.resources_name_prefix}_${var.testbed_name}-key"
+ public_key = tls_private_key.CSITTLS.public_key_openssh
+}
+
+resource "aws_placement_group" "CSITPG" {
+ name = "${var.resources_name_prefix}_${var.testbed_name}-pg"
+ strategy = "cluster"
+}
+
+# NICs
+resource "aws_network_interface" "dut1_if1" {
+ subnet_id = aws_subnet.b.id
+ source_dest_check = false
+ private_ip = var.dut1_if1_ip
+ private_ips = [var.dut1_if1_ip]
+ security_groups = [aws_security_group.CSITSG.id]
+ depends_on = [aws_vpc.CSITVPC, aws_subnet.b]
+
+ attachment {
+ instance = aws_instance.dut1.id
+ device_index = 1
+ }
+
+ tags = {
+ "Environment" = var.environment_name
+ }
+}
+
+resource "aws_network_interface" "dut1_if2" {
+ subnet_id = aws_subnet.d.id
+ source_dest_check = false
+ private_ip = var.dut1_if2_ip
+ private_ips = [var.dut1_if2_ip]
+ security_groups = [aws_security_group.CSITSG.id]
+ depends_on = [aws_vpc.CSITVPC]
+
+ attachment {
+ instance = aws_instance.dut1.id
+ device_index = 2
+ }
+
+ tags = {
+ "Environment" = var.environment_name
+ }
+}
+
+resource "aws_network_interface" "tg_if1" {
+ subnet_id = aws_subnet.b.id
+ source_dest_check = false
+ private_ip = var.tg_if1_ip
+ private_ips = [var.tg_if1_ip]
+ security_groups = [aws_security_group.CSITSG.id]
+ depends_on = [aws_vpc.CSITVPC, aws_subnet.b]
+
+ attachment {
+ instance = aws_instance.tg.id
+ device_index = 1
+ }
+
+ tags = {
+ "Environment" = var.environment_name
+ }
+}
+
+resource "aws_network_interface" "tg_if2" {
+ subnet_id = aws_subnet.d.id
+ source_dest_check = false
+ private_ip = var.tg_if2_ip
+ private_ips = [var.tg_if2_ip]
+ security_groups = [aws_security_group.CSITSG.id]
+ depends_on = [aws_vpc.CSITVPC, aws_subnet.d]
+
+ attachment {
+ instance = aws_instance.tg.id
+ device_index = 2
+ }
+
+ tags = {
+ "Environment" = var.environment_name
+ }
+}
+
+data "aws_network_interface" "dut1_if1" {
+ id = aws_network_interface.dut1_if1.id
+}
+
+data "aws_network_interface" "dut1_if2" {
+ id = aws_network_interface.dut1_if2.id
+}
+
+data "aws_network_interface" "tg_if1" {
+ id = aws_network_interface.tg_if1.id
+}
+
+data "aws_network_interface" "tg_if2" {
+ id = aws_network_interface.tg_if2.id
+}
+
+# Instances
+resource "aws_instance" "tg" {
+ depends_on = [aws_vpc.CSITVPC, aws_placement_group.CSITPG]
+ ami = var.ami_image
+ availability_zone = var.avail_zone
+ instance_type = var.instance_type
+ key_name = aws_key_pair.CSITKP.key_name
+ associate_public_ip_address = true
+ subnet_id = aws_subnet.mgmt.id
+ private_ip = var.tg_mgmt_ip
+ vpc_security_group_ids = [aws_security_group.CSITSG.id]
+ placement_group = aws_placement_group.CSITPG.id
+ source_dest_check = false
+ # host_id = "1"
+
+ root_block_device {
+ volume_size = 50
+ }
+
+ tags = {
+ "Name" = "${var.resources_name_prefix}_${var.testbed_name}-tg"
+ "Environment" = var.environment_name
+ }
+}
+
+resource "aws_instance" "dut1" {
+ depends_on = [aws_vpc.CSITVPC, aws_placement_group.CSITPG]
+ ami = var.ami_image
+ availability_zone = var.avail_zone
+ instance_type = var.instance_type
+ key_name = aws_key_pair.CSITKP.key_name
+ associate_public_ip_address = true
+ subnet_id = aws_subnet.mgmt.id
+ private_ip = var.dut1_mgmt_ip
+ vpc_security_group_ids = [aws_security_group.CSITSG.id]
+ placement_group = aws_placement_group.CSITPG.id
+ source_dest_check = false
+ # host_id = "2"
+
+ root_block_device {
+ volume_size = 50
+ }
+
+ tags = {
+ "Name" = "${var.resources_name_prefix}_${var.testbed_name}-dut1"
+ "Environment" = var.environment_name
+ }
+}
+
+# Routes
+resource "aws_route" "CSIT-igw" {
+ route_table_id = aws_vpc.CSITVPC.main_route_table_id
+ gateway_id = aws_internet_gateway.CSITGW.id
+ destination_cidr_block = "0.0.0.0/0"
+ depends_on = [aws_vpc.CSITVPC, aws_internet_gateway.CSITGW]
+}
+
+resource "aws_route" "dummy-trex-port-0" {
+ route_table_id = aws_vpc.CSITVPC.main_route_table_id
+ network_interface_id = aws_instance.tg.primary_network_interface_id
+ destination_cidr_block = var.trex_dummy_cidr_port_0
+ depends_on = [aws_vpc.CSITVPC, aws_instance.dut1]
+}
+
+resource "aws_route" "dummy-trex-port-1" {
+ route_table_id = aws_vpc.CSITVPC.main_route_table_id
+ network_interface_id = aws_instance.tg.primary_network_interface_id
+ destination_cidr_block = var.trex_dummy_cidr_port_1
+ depends_on = [aws_vpc.CSITVPC, aws_instance.dut1]
+}
+
+# Deployment/Ansible
+resource "null_resource" "deploy_tg" {
+ depends_on = [
+ aws_instance.tg,
+ aws_network_interface.tg_if1,
+ aws_network_interface.tg_if2
+ ]
+
+ connection {
+ user = "ubuntu"
+ host = aws_instance.tg.public_ip
+ private_key = tls_private_key.CSITTLS.private_key_pem
+ }
+
+ provisioner "remote-exec" {
+ inline = var.first_run_commands
+ }
+
+ provisioner "ansible" {
+ plays {
+ playbook {
+ file_path = var.ansible_file_path
+ force_handlers = true
+ }
+ hosts = ["tg_aws"]
+ extra_vars = {
+ ansible_ssh_pass = var.ansible_provision_pwd
+ ansible_python_interpreter = var.ansible_python_executable
+ aws = true
+ }
+ }
+ }
+
+ provisioner "remote-exec" {
+ on_failure = continue
+ inline = ["sudo reboot"]
+ }
+}
+
+resource "null_resource" "deploy_dut1" {
+ depends_on = [
+ aws_instance.dut1,
+ aws_network_interface.dut1_if1,
+ aws_network_interface.dut1_if2
+ ]
+
+ connection {
+ user = "ubuntu"
+ host = aws_instance.dut1.public_ip
+ private_key = tls_private_key.CSITTLS.private_key_pem
+ }
+
+ provisioner "remote-exec" {
+ inline = var.first_run_commands
+ }
+
+ provisioner "ansible" {
+ plays {
+ playbook {
+ file_path = var.ansible_file_path
+ force_handlers = true
+ }
+ hosts = ["sut_aws"]
+ extra_vars = {
+ ansible_ssh_pass = var.ansible_provision_pwd
+ ansible_python_interpreter = var.ansible_python_executable
+ aws = true
+ }
+ }
+ }
+
+ provisioner "remote-exec" {
+ on_failure = continue
+ inline = ["sudo reboot"]
+ }
+}
+
+resource "null_resource" "deploy_topology" {
+ depends_on = [ aws_instance.tg, aws_instance.dut1 ]
+
+ provisioner "ansible" {
+ plays {
+ playbook {
+ file_path = var.ansible_topology_path
+ }
+ hosts = ["local"]
+ extra_vars = {
+ ansible_python_interpreter = var.ansible_python_executable
+ testbed_name = var.testbed_name
+ cloud_topology = var.topology_name
+ tg_if1_mac = data.aws_network_interface.tg_if1.mac_address
+ tg_if2_mac = data.aws_network_interface.tg_if2.mac_address
+ dut1_if1_mac = data.aws_network_interface.dut1_if1.mac_address
+ dut1_if2_mac = data.aws_network_interface.dut1_if2.mac_address
+ tg_public_ip = aws_instance.tg.public_ip
+ dut1_public_ip = aws_instance.dut1.public_ip
+ public_ip_list = "${aws_instance.tg.public_ip},${aws_instance.dut1.public_ip}"
+ }
+ }
+ }
+}
diff --git a/fdio.infra.terraform/2n_aws_c5n/deploy/variables.tf b/fdio.infra.terraform/2n_aws_c5n/deploy/variables.tf
new file mode 100644
index 0000000000..d1ff1d6575
--- /dev/null
+++ b/fdio.infra.terraform/2n_aws_c5n/deploy/variables.tf
@@ -0,0 +1,143 @@
+variable "region" {
+ description = "AWS Region"
+ type = string
+ default = "eu-central-1"
+}
+
+variable "ami_image" {
+ description = "AWS AMI image name"
+ type = string
+ default = "ami-0b418580298265d5c"
+}
+
+variable "testbed_name" {
+ description = "Testbed name"
+ type = string
+ default = "testbed1"
+}
+
+variable "topology_name" {
+ description = "Prefix used when creating a topology file"
+ type = string
+ default = "2n_aws_c5n"
+}
+
+variable "instance_type" {
+ description = "AWS instance type"
+ type = string
+ default = "c5n.4xlarge"
+}
+
+variable "avail_zone" {
+ description = "AWS availability zone"
+ type = string
+ default = "eu-central-1a"
+}
+
+variable "environment_name" {
+ description = "Environment name - used for Environment tag"
+ type = string
+ default = "CSIT-AWS"
+}
+
+variable "resources_name_prefix" {
+ description = "Resource prefix - used for Name tag"
+ type = string
+ default = "CSIT_2n_aws_c5n"
+}
+
+variable "first_run_commands" {
+ description = "Commands to run after deployment via remote-exec"
+ type = list(string)
+ default = [""]
+}
+
+variable "ansible_file_path" {
+ description = "Path to Ansible playbook"
+ type = string
+ default = "../../resources/tools/testbed-setup/ansible/site.yaml"
+}
+
+variable "ansible_python_executable" {
+ description = "Path to Python interpreter"
+ type = string
+ default = "/usr/bin/python3"
+}
+
+variable "ansible_topology_path" {
+ description = "Path to Ansible playbook which creates a topology file"
+ type = string
+ default = "../../resources/tools/testbed-setup/ansible/cloud_topology.yaml"
+}
+
+variable "ansible_provision_pwd" {
+ description = "Password used for ansible provisioning (ansible_ssh_pass)"
+ type = string
+ default = "Csit1234"
+}
+
+# Base VPC CIDRs
+variable "vpc_cidr_mgmt" {
+ description = "Management CIDR block"
+ type = string
+ default = "192.168.0.0/24"
+}
+variable "vpc_cidr_b" {
+ description = "CIDR block B"
+ type = string
+ default = "192.168.10.0/24"
+}
+variable "vpc_cidr_c" {
+ description = "CIDR block C"
+ type = string
+ default = "200.0.0.0/24"
+}
+variable "vpc_cidr_d" {
+ description = "CIDR block D"
+ type = string
+ default = "192.168.20.0/24"
+}
+
+# Trex Dummy CIDRs
+variable "trex_dummy_cidr_port_0" {
+ description = "TREX dummy CIDR"
+ type = string
+ default = "10.0.0.0/24"
+}
+variable "trex_dummy_cidr_port_1" {
+ description = "TREX dummy CIDR"
+ type = string
+ default = "20.0.0.0/24"
+}
+
+# IPs
+variable "tg_if1_ip" {
+ description = "TG IP on interface 1"
+ type = string
+ default = "192.168.10.254"
+}
+variable "tg_if2_ip" {
+ description = "TG IP on interface 2"
+ type = string
+ default = "192.168.20.254"
+}
+variable "dut1_if1_ip" {
+ description = "DUT IP on interface 1"
+ type = string
+ default = "192.168.10.11"
+}
+variable "dut1_if2_ip" {
+ description = "DUT IP on interface 1"
+ type = string
+ default = "192.168.20.11"
+}
+variable "tg_mgmt_ip" {
+ description = "TG management interface IP"
+ type = string
+ default = "192.168.0.10"
+}
+variable "dut1_mgmt_ip" {
+ description = "DUT management interface IP"
+ type = string
+ default = "192.168.0.11"
+}
diff --git a/fdio.infra.terraform/2n_aws_c5n/deploy/versions.tf b/fdio.infra.terraform/2n_aws_c5n/deploy/versions.tf
new file mode 100644
index 0000000000..8b7b8c5f32
--- /dev/null
+++ b/fdio.infra.terraform/2n_aws_c5n/deploy/versions.tf
@@ -0,0 +1,17 @@
+terraform {
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = "~> 3.28.0"
+ }
+ null = {
+ source = "hashicorp/null"
+ version = "~> 3.0.0"
+ }
+ tls = {
+ source = "hashicorp/tls"
+ version = "~> 3.0.0"
+ }
+ }
+ required_version = ">= 0.13"
+}
diff --git a/fdio.infra.terraform/2n_aws_c5n/main.tf b/fdio.infra.terraform/2n_aws_c5n/main.tf
new file mode 100644
index 0000000000..a73288d9e8
--- /dev/null
+++ b/fdio.infra.terraform/2n_aws_c5n/main.tf
@@ -0,0 +1,53 @@
+module "deploy" {
+ source = "./deploy"
+
+ # TODO: Use ENV variable for testbed_name for dynamic deployment
+ testbed_name = "testbed1"
+ topology_name = "2n_aws_c5n"
+ environment_name = "CSIT-AWS"
+ resources_name_prefix = "CSIT_2n_aws_c5n"
+
+ # AWS general
+ region = "eu-central-1"
+ avail_zone = "eu-central-1a"
+ instance_type = "c5n.4xlarge"
+ ami_image = "ami-0b418580298265d5c"
+ # eu-central-1/bionic-18.04-amd64-hvm-ssd-20200112
+ # kernel 4.15.0-1057-aws (~4.15.0-74)
+
+ # AWS Network
+ vpc_cidr_mgmt = "192.168.0.0/24"
+ vpc_cidr_b = "192.168.10.0/24"
+ vpc_cidr_c = "200.0.0.0/24"
+ vpc_cidr_d = "192.168.20.0/24"
+
+ tg_mgmt_ip = "192.168.0.10"
+ dut1_mgmt_ip = "192.168.0.11"
+
+ tg_if1_ip = "192.168.10.254"
+ tg_if2_ip = "192.168.20.254"
+ dut1_if1_ip = "192.168.10.11"
+ dut1_if2_ip = "192.168.20.11"
+
+ trex_dummy_cidr_port_0 = "10.0.0.0/24"
+ trex_dummy_cidr_port_1 = "20.0.0.0/24"
+
+ # Ansible
+ ansible_python_executable = "/usr/bin/python3"
+ ansible_file_path = "../../resources/tools/testbed-setup/ansible/site.yaml"
+ ansible_topology_path = "../../resources/tools/testbed-setup/ansible/cloud_topology.yaml"
+ ansible_provision_pwd = "Csit1234"
+
+ # First run
+ # TODO: Remove the testuser creation when added to user_add ansible role
+ first_run_commands = [
+ "sudo sed -i 's/^PasswordAuthentication/#PasswordAuthentication/' /etc/ssh/sshd_config",
+ "sudo systemctl restart sshd",
+ "sudo useradd --create-home -s /bin/bash provisionuser",
+ "echo 'provisionuser:Csit1234' | sudo chpasswd",
+ "echo 'provisionuser ALL = (ALL) NOPASSWD: ALL' | sudo tee -a /etc/sudoers",
+ "sudo useradd --create-home -s /bin/bash testuser",
+ "echo 'testuser:Csit1234' | sudo chpasswd",
+ "echo 'testuser ALL = (ALL) NOPASSWD: ALL' | sudo tee -a /etc/sudoers"
+ ]
+}