aboutsummaryrefslogtreecommitdiffstats
path: root/fdio.infra.terraform/2n_aws_c5n/deploy/main.tf
diff options
context:
space:
mode:
authorpmikus <pmikus@cisco.com>2021-09-06 12:19:11 +0000
committerPeter Mikus <pmikus@cisco.com>2021-09-06 12:23:29 +0000
commit16770f5d50d0e1f9f82901e19b106fc1b88c41b8 (patch)
tree2c29245adc9014a0836be51fd86683b11aef0ab1 /fdio.infra.terraform/2n_aws_c5n/deploy/main.tf
parent1ff165dc63cc5b1a2bfab11fa6a234b42e3c99ce (diff)
Terraform: Cleanup
Signed-off-by: pmikus <pmikus@cisco.com> Change-Id: Ifec4087b4522754cb05f5b0408c02dad4e9ada67
Diffstat (limited to 'fdio.infra.terraform/2n_aws_c5n/deploy/main.tf')
-rw-r--r--fdio.infra.terraform/2n_aws_c5n/deploy/main.tf264
1 files changed, 132 insertions, 132 deletions
diff --git a/fdio.infra.terraform/2n_aws_c5n/deploy/main.tf b/fdio.infra.terraform/2n_aws_c5n/deploy/main.tf
index 8ab8311220..9bac6a7b35 100644
--- a/fdio.infra.terraform/2n_aws_c5n/deploy/main.tf
+++ b/fdio.infra.terraform/2n_aws_c5n/deploy/main.tf
@@ -1,220 +1,220 @@
data "vault_aws_access_credentials" "creds" {
- backend = "${var.vault-name}-path"
- role = "${var.vault-name}-role"
+ backend = "${var.vault-name}-path"
+ role = "${var.vault-name}-role"
}
resource "aws_vpc" "CSITVPC" {
cidr_block = var.vpc_cidr_mgmt
- tags = {
- "Name" = "${var.resources_name_prefix}_${var.testbed_name}-vpc"
- "Environment" = var.environment_name
+ tags = {
+ "Name" = "${var.resources_name_prefix}_${var.testbed_name}-vpc"
+ "Environment" = var.environment_name
}
}
resource "aws_security_group" "CSITSG" {
- name = "${var.resources_name_prefix}_${var.testbed_name}-sg"
- description = "Allow inbound traffic"
- vpc_id = aws_vpc.CSITVPC.id
- depends_on = [aws_vpc.CSITVPC]
+ name = "${var.resources_name_prefix}_${var.testbed_name}-sg"
+ description = "Allow inbound traffic"
+ vpc_id = aws_vpc.CSITVPC.id
+ depends_on = [aws_vpc.CSITVPC]
ingress {
- from_port = 22
- to_port = 22
- protocol = "tcp"
- cidr_blocks = ["0.0.0.0/0"]
+ from_port = 22
+ to_port = 22
+ protocol = "tcp"
+ cidr_blocks = ["0.0.0.0/0"]
}
ingress {
- from_port = 0
- to_port = 0
- protocol = -1
- self = true
+ from_port = 0
+ to_port = 0
+ protocol = -1
+ self = true
}
egress {
- from_port = 0
- to_port = 0
- protocol = "-1"
- cidr_blocks = ["0.0.0.0/0"]
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = ["0.0.0.0/0"]
}
tags = {
- "Name" = "${var.resources_name_prefix}_${var.testbed_name}-sg"
- "Environment" = var.environment_name
+ "Name" = "${var.resources_name_prefix}_${var.testbed_name}-sg"
+ "Environment" = var.environment_name
}
}
resource "aws_vpc_ipv4_cidr_block_association" "b" {
- vpc_id = aws_vpc.CSITVPC.id
- cidr_block = var.vpc_cidr_b
- depends_on = [aws_vpc.CSITVPC]
+ vpc_id = aws_vpc.CSITVPC.id
+ cidr_block = var.vpc_cidr_b
+ depends_on = [aws_vpc.CSITVPC]
}
resource "aws_vpc_ipv4_cidr_block_association" "c" {
- vpc_id = aws_vpc.CSITVPC.id
- cidr_block = var.vpc_cidr_c
- depends_on = [aws_vpc.CSITVPC]
+ vpc_id = aws_vpc.CSITVPC.id
+ cidr_block = var.vpc_cidr_c
+ depends_on = [aws_vpc.CSITVPC]
}
resource "aws_vpc_ipv4_cidr_block_association" "d" {
- vpc_id = aws_vpc.CSITVPC.id
- cidr_block = var.vpc_cidr_d
- depends_on = [aws_vpc.CSITVPC]
+ vpc_id = aws_vpc.CSITVPC.id
+ cidr_block = var.vpc_cidr_d
+ depends_on = [aws_vpc.CSITVPC]
}
# Subnets
resource "aws_subnet" "mgmt" {
- vpc_id = aws_vpc.CSITVPC.id
- cidr_block = var.vpc_cidr_mgmt
- availability_zone = var.avail_zone
- depends_on = [aws_vpc.CSITVPC]
+ vpc_id = aws_vpc.CSITVPC.id
+ cidr_block = var.vpc_cidr_mgmt
+ availability_zone = var.avail_zone
+ depends_on = [aws_vpc.CSITVPC]
tags = {
- "Environment" = var.environment_name
+ "Environment" = var.environment_name
}
}
resource "aws_subnet" "b" {
- vpc_id = aws_vpc.CSITVPC.id
- cidr_block = var.vpc_cidr_b
- availability_zone = var.avail_zone
- depends_on = [aws_vpc.CSITVPC, aws_vpc_ipv4_cidr_block_association.b]
+ vpc_id = aws_vpc.CSITVPC.id
+ cidr_block = var.vpc_cidr_b
+ availability_zone = var.avail_zone
+ depends_on = [aws_vpc.CSITVPC, aws_vpc_ipv4_cidr_block_association.b]
tags = {
- "Environment" = var.environment_name
+ "Environment" = var.environment_name
}
}
resource "aws_subnet" "c" {
- vpc_id = aws_vpc.CSITVPC.id
- cidr_block = var.vpc_cidr_c
- availability_zone = var.avail_zone
- depends_on = [aws_vpc.CSITVPC, aws_vpc_ipv4_cidr_block_association.c]
+ vpc_id = aws_vpc.CSITVPC.id
+ cidr_block = var.vpc_cidr_c
+ availability_zone = var.avail_zone
+ depends_on = [aws_vpc.CSITVPC, aws_vpc_ipv4_cidr_block_association.c]
tags = {
- "Environment" = var.environment_name
+ "Environment" = var.environment_name
}
}
resource "aws_subnet" "d" {
- vpc_id = aws_vpc.CSITVPC.id
- cidr_block = var.vpc_cidr_d
- availability_zone = var.avail_zone
- depends_on = [aws_vpc.CSITVPC, aws_vpc_ipv4_cidr_block_association.d]
+ vpc_id = aws_vpc.CSITVPC.id
+ cidr_block = var.vpc_cidr_d
+ availability_zone = var.avail_zone
+ depends_on = [aws_vpc.CSITVPC, aws_vpc_ipv4_cidr_block_association.d]
tags = {
- "Environment" = var.environment_name
+ "Environment" = var.environment_name
}
}
resource "aws_internet_gateway" "CSITGW" {
- vpc_id = aws_vpc.CSITVPC.id
- depends_on = [aws_vpc.CSITVPC]
+ vpc_id = aws_vpc.CSITVPC.id
+ depends_on = [aws_vpc.CSITVPC]
tags = {
- "Environment" = var.environment_name
+ "Environment" = var.environment_name
}
}
# SSH keypair
# Temporary key for provisioning only
resource "tls_private_key" "CSITTLS" {
- algorithm = "RSA"
- rsa_bits = 4096
+ algorithm = "RSA"
+ rsa_bits = 4096
}
resource "aws_key_pair" "CSITKP" {
- key_name = "${var.resources_name_prefix}_${var.testbed_name}-key"
- public_key = tls_private_key.CSITTLS.public_key_openssh
+ key_name = "${var.resources_name_prefix}_${var.testbed_name}-key"
+ public_key = tls_private_key.CSITTLS.public_key_openssh
}
resource "aws_placement_group" "CSITPG" {
- name = "${var.resources_name_prefix}_${var.testbed_name}-pg"
- strategy = "cluster"
+ name = "${var.resources_name_prefix}_${var.testbed_name}-pg"
+ strategy = "cluster"
}
# NICs
resource "aws_network_interface" "dut1_if1" {
- subnet_id = aws_subnet.b.id
- source_dest_check = false
- private_ip = var.dut1_if1_ip
- private_ips = [var.dut1_if1_ip]
- security_groups = [aws_security_group.CSITSG.id]
- depends_on = [aws_vpc.CSITVPC, aws_subnet.b, aws_instance.dut1]
+ subnet_id = aws_subnet.b.id
+ source_dest_check = false
+ private_ip = var.dut1_if1_ip
+ private_ips = [var.dut1_if1_ip]
+ security_groups = [aws_security_group.CSITSG.id]
+ depends_on = [aws_vpc.CSITVPC, aws_subnet.b, aws_instance.dut1]
attachment {
- instance = aws_instance.dut1.id
- device_index = 1
+ instance = aws_instance.dut1.id
+ device_index = 1
}
tags = {
- "Environment" = var.environment_name
+ "Environment" = var.environment_name
}
}
resource "aws_network_interface" "dut1_if2" {
- subnet_id = aws_subnet.d.id
- source_dest_check = false
- private_ip = var.dut1_if2_ip
- private_ips = [var.dut1_if2_ip]
- security_groups = [aws_security_group.CSITSG.id]
- depends_on = [aws_vpc.CSITVPC, aws_subnet.d, aws_instance.dut1]
+ subnet_id = aws_subnet.d.id
+ source_dest_check = false
+ private_ip = var.dut1_if2_ip
+ private_ips = [var.dut1_if2_ip]
+ security_groups = [aws_security_group.CSITSG.id]
+ depends_on = [aws_vpc.CSITVPC, aws_subnet.d, aws_instance.dut1]
attachment {
- instance = aws_instance.dut1.id
- device_index = 2
+ instance = aws_instance.dut1.id
+ device_index = 2
}
tags = {
- "Environment" = var.environment_name
+ "Environment" = var.environment_name
}
}
resource "aws_network_interface" "tg_if1" {
- subnet_id = aws_subnet.b.id
- source_dest_check = false
- private_ip = var.tg_if1_ip
- private_ips = [var.tg_if1_ip]
- security_groups = [aws_security_group.CSITSG.id]
- depends_on = [aws_vpc.CSITVPC, aws_subnet.b, aws_instance.tg]
+ subnet_id = aws_subnet.b.id
+ source_dest_check = false
+ private_ip = var.tg_if1_ip
+ private_ips = [var.tg_if1_ip]
+ security_groups = [aws_security_group.CSITSG.id]
+ depends_on = [aws_vpc.CSITVPC, aws_subnet.b, aws_instance.tg]
attachment {
- instance = aws_instance.tg.id
- device_index = 1
+ instance = aws_instance.tg.id
+ device_index = 1
}
- tags = {
- "Environment" = var.environment_name
+ tags = {
+ "Environment" = var.environment_name
}
}
resource "aws_network_interface" "tg_if2" {
- subnet_id = aws_subnet.d.id
- source_dest_check = false
- private_ip = var.tg_if2_ip
- private_ips = [var.tg_if2_ip]
- security_groups = [aws_security_group.CSITSG.id]
- depends_on = [aws_vpc.CSITVPC, aws_subnet.d, aws_instance.tg]
+ subnet_id = aws_subnet.d.id
+ source_dest_check = false
+ private_ip = var.tg_if2_ip
+ private_ips = [var.tg_if2_ip]
+ security_groups = [aws_security_group.CSITSG.id]
+ depends_on = [aws_vpc.CSITVPC, aws_subnet.d, aws_instance.tg]
attachment {
- instance = aws_instance.tg.id
- device_index = 2
+ instance = aws_instance.tg.id
+ device_index = 2
}
- tags = {
- "Environment" = var.environment_name
+ tags = {
+ "Environment" = var.environment_name
}
}
data "aws_network_interface" "dut1_if1" {
- id = aws_network_interface.dut1_if1.id
+ id = aws_network_interface.dut1_if1.id
}
data "aws_network_interface" "dut1_if2" {
- id = aws_network_interface.dut1_if2.id
+ id = aws_network_interface.dut1_if2.id
}
data "aws_network_interface" "tg_if1" {
- id = aws_network_interface.tg_if1.id
+ id = aws_network_interface.tg_if1.id
}
data "aws_network_interface" "tg_if2" {
- id = aws_network_interface.tg_if2.id
+ id = aws_network_interface.tg_if2.id
}
# Instances
@@ -238,8 +238,8 @@ resource "aws_instance" "tg" {
}
tags = {
- "Name" = "${var.resources_name_prefix}_${var.testbed_name}-tg"
- "Environment" = var.environment_name
+ "Name" = "${var.resources_name_prefix}_${var.testbed_name}-tg"
+ "Environment" = var.environment_name
}
}
@@ -263,8 +263,8 @@ resource "aws_instance" "dut1" {
}
tags = {
- "Name" = "${var.resources_name_prefix}_${var.testbed_name}-dut1"
- "Environment" = var.environment_name
+ "Name" = "${var.resources_name_prefix}_${var.testbed_name}-dut1"
+ "Environment" = var.environment_name
}
}
@@ -292,7 +292,7 @@ resource "aws_route" "dummy-trex-port-1" {
# Deployment/Ansible
resource "null_resource" "deploy_tg" {
- depends_on = [
+ depends_on = [
aws_instance.tg,
aws_network_interface.tg_if1,
aws_network_interface.tg_if2,
@@ -302,23 +302,23 @@ resource "null_resource" "deploy_tg" {
]
connection {
- user = "ubuntu"
- host = aws_instance.tg.public_ip
- private_key = tls_private_key.CSITTLS.private_key_pem
+ user = "ubuntu"
+ host = aws_instance.tg.public_ip
+ private_key = tls_private_key.CSITTLS.private_key_pem
}
provisioner "remote-exec" {
- inline = var.first_run_commands
+ inline = var.first_run_commands
}
provisioner "ansible" {
plays {
playbook {
- file_path = var.ansible_file_path
- force_handlers = true
+ file_path = var.ansible_file_path
+ force_handlers = true
}
- hosts = ["tg_aws"]
- extra_vars = {
+ hosts = ["tg_aws"]
+ extra_vars = {
ansible_ssh_pass = var.ansible_provision_pwd
ansible_python_interpreter = var.ansible_python_executable
aws = true
@@ -327,13 +327,13 @@ resource "null_resource" "deploy_tg" {
}
provisioner "remote-exec" {
- on_failure = continue
- inline = ["sudo reboot"]
+ on_failure = continue
+ inline = ["sudo reboot"]
}
}
resource "null_resource" "deploy_dut1" {
- depends_on = [
+ depends_on = [
aws_instance.tg,
aws_network_interface.tg_if1,
aws_network_interface.tg_if2,
@@ -343,23 +343,23 @@ resource "null_resource" "deploy_dut1" {
]
connection {
- user = "ubuntu"
- host = aws_instance.dut1.public_ip
- private_key = tls_private_key.CSITTLS.private_key_pem
+ user = "ubuntu"
+ host = aws_instance.dut1.public_ip
+ private_key = tls_private_key.CSITTLS.private_key_pem
}
provisioner "remote-exec" {
- inline = var.first_run_commands
+ inline = var.first_run_commands
}
provisioner "ansible" {
plays {
playbook {
- file_path = var.ansible_file_path
- force_handlers = true
+ file_path = var.ansible_file_path
+ force_handlers = true
}
- hosts = ["sut_aws"]
- extra_vars = {
+ hosts = ["sut_aws"]
+ extra_vars = {
ansible_ssh_pass = var.ansible_provision_pwd
ansible_python_interpreter = var.ansible_python_executable
aws = true
@@ -368,21 +368,21 @@ resource "null_resource" "deploy_dut1" {
}
provisioner "remote-exec" {
- on_failure = continue
- inline = ["sudo reboot"]
+ on_failure = continue
+ inline = ["sudo reboot"]
}
}
resource "null_resource" "deploy_topology" {
- depends_on = [ aws_instance.tg, aws_instance.dut1 ]
+ depends_on = [ aws_instance.tg, aws_instance.dut1 ]
provisioner "ansible" {
plays {
playbook {
file_path = var.ansible_topology_path
}
- hosts = ["local"]
- extra_vars = {
+ hosts = ["local"]
+ extra_vars = {
ansible_python_interpreter = var.ansible_python_executable
testbed_name = var.testbed_name
cloud_topology = var.topology_name
:param pkts: iterable packets """ try: if os.path.isfile(self.in_path): name = "%s/history.[timestamp:%f].[%s-counter:%04d].%s" %\ (self.test.tempdir, time.time(), self.name, self.in_history_counter, self._in_file) self.test.logger.debug("Renaming %s->%s" % (self.in_path, name)) os.rename(self.in_path, name) except: pass wrpcap(self.in_path, pkts) self.test.register_capture(self.cap_name) # FIXME this should be an API, but no such exists atm self.test.vapi.cli(self.input_cli) def generate_debug_aid(self, kind): """ Create a hardlink to the out file with a counter and a file containing stack trace to ease debugging in case of multiple capture files present. """ self.test.logger.debug("Generating debug aid for %s on %s" % (kind, self._name)) link_path, stack_path = ["%s/debug_%s_%s_%s.%s" % (self.test.tempdir, self._name, self._out_assert_counter, kind, suffix) for suffix in ["pcap", "stack"] ] os.link(self.out_path, link_path) with open(stack_path, "w") as f: f.writelines(format_stack()) self._out_assert_counter += 1 def _get_capture(self, timeout, filter_out_fn=is_ipv6_misc): """ Helper method to get capture and filter it """ try: if not self.wait_for_capture_file(timeout): return None output = rdpcap(self.out_path) self.test.logger.debug("Capture has %s packets" % len(output.res)) except: self.test.logger.debug("Exception in scapy.rdpcap (%s): %s" % (self.out_path, format_exc())) return None before = len(output.res) if filter_out_fn: output.res = [p for p in output.res if not filter_out_fn(p)] removed = before - len(output.res) if removed: self.test.logger.debug( "Filtered out %s packets from capture (returning %s)" % (removed, len(output.res))) return output def get_capture(self, expected_count=None, remark=None, timeout=1, filter_out_fn=is_ipv6_misc): """ Get captured packets :param expected_count: expected number of packets to capture, if None, then self.test.packet_count_for_dst_pg_idx is used to lookup the expected count :param remark: remark printed into debug logs :param timeout: how long to wait for packets :param filter_out_fn: filter applied to each packet, packets for which the filter returns True are removed from capture :returns: iterable packets """ remaining_time = timeout capture = None name = self.name if remark is None else "%s (%s)" % (self.name, remark) based_on = "based on provided argument" if expected_count is None: expected_count = \ self.test.get_packet_count_for_if_idx(self.sw_if_index) based_on = "based on stored packet_infos" if expected_count == 0: raise Exception( "Internal error, expected packet count for %s is 0!" % name) self.test.logger.debug("Expecting to capture %s (%s) packets on %s" % ( expected_count, based_on, name)) while remaining_time > 0: before = time.time() capture = self._get_capture(remaining_time, filter_out_fn) elapsed_time = time.time() - before if capture: if len(capture.res) == expected_count: # bingo, got the packets we expected return capture elif len(capture.res) > expected_count: self.test.logger.error( ppc("Unexpected packets captured:", capture)) break else: self.test.logger.debug("Partial capture containing %s " "packets doesn't match expected " "count %s (yet?)" % (len(capture.res), expected_count)) elif expected_count == 0: # bingo, got None as we expected - return empty capture return PacketList() remaining_time -= elapsed_time if capture: self.generate_debug_aid("count-mismatch") raise Exception("Captured packets mismatch, captured %s packets, " "expected %s packets on %s" % (len(capture.res), expected_count, name)) else: raise Exception("No packets captured on %s" % name) def assert_nothing_captured(self, remark=None, filter_out_fn=is_ipv6_misc): """ Assert that nothing unfiltered was captured on interface :param remark: remark printed into debug logs :param filter_out_fn: filter applied to each packet, packets for which the filter returns True are removed from capture """ if os.path.isfile(self.out_path): try: capture = self.get_capture( 0, remark=remark, filter_out_fn=filter_out_fn) if not capture or len(capture.res) == 0: # junk filtered out, we're good return except: pass self.generate_debug_aid("empty-assert") if remark: raise AssertionError( "Non-empty capture file present for interface %s (%s)" % (self.name, remark)) else: raise AssertionError("Capture file present for interface %s" % self.name) def wait_for_capture_file(self, timeout=1): """ Wait until pcap capture file appears :param timeout: How long to wait for the packet (default 1s) :returns: True/False if the file is present or appears within timeout """ deadline = time.time() + timeout if not os.path.isfile(self.out_path): self.test.logger.debug("Waiting for capture file %s to appear, " "timeout is %ss" % (self.out_path, timeout)) else: self.test.logger.debug("Capture file %s already exists" % self.out_path) return True while time.time() < deadline: if os.path.isfile(self.out_path): break time.sleep(0) # yield if os.path.isfile(self.out_path): self.test.logger.debug("Capture file appeared after %fs" % (time.time() - (deadline - timeout))) else: self.test.logger.debug("Timeout - capture file still nowhere") return False return True def verify_enough_packet_data_in_pcap(self): """ Check if enough data is available in file handled by internal pcap reader so that a whole packet can be read. :returns: True if enough data present, else False """ orig_pos = self._pcap_reader.f.tell() # save file position enough_data = False # read packet header from pcap packet_header_size = 16 caplen = None end_pos = None hdr = self._pcap_reader.f.read(packet_header_size) if len(hdr) == packet_header_size: # parse the capture length - caplen sec, usec, caplen, wirelen = struct.unpack( self._pcap_reader.endian + "IIII", hdr) self._pcap_reader.f.seek(0, 2) # seek to end of file end_pos = self._pcap_reader.f.tell() # get position at end if end_pos >= orig_pos + len(hdr) + caplen: enough_data = True # yay, we have enough data self._pcap_reader.f.seek(orig_pos, 0) # restore original position return enough_data def wait_for_packet(self, timeout, filter_out_fn=is_ipv6_misc): """ Wait for next packet captured with a timeout :param timeout: How long to wait for the packet :returns: Captured packet if no packet arrived within timeout :raises Exception: if no packet arrives within timeout """ deadline = time.time() + timeout if self._pcap_reader is None: if not self.wait_for_capture_file(timeout): raise CaptureTimeoutError("Capture file %s did not appear " "within timeout" % self.out_path) while time.time() < deadline: try: self._pcap_reader = PcapReader(self.out_path) break except: self.test.logger.debug( "Exception in scapy.PcapReader(%s): %s" % (self.out_path, format_exc())) if not self._pcap_reader: raise CaptureTimeoutError("Capture file %s did not appear within " "timeout" % self.out_path) poll = False if timeout > 0: self.test.logger.debug("Waiting for packet") else: poll = True self.test.logger.debug("Polling for packet") while time.time() < deadline or poll: if not self.verify_enough_packet_data_in_pcap(): time.sleep(0) # yield poll = False continue p = self._pcap_reader.recv() if p is not None: if filter_out_fn is not None and filter_out_fn(p): self.test.logger.debug( "Packet received after %ss was filtered out" % (time.time() - (deadline - timeout))) else: self.test.logger.debug( "Packet received after %fs" % (time.time() - (deadline - timeout))) return p time.sleep(0) # yield poll = False self.test.logger.debug("Timeout - no packets received") raise CaptureTimeoutError("Packet didn't arrive within timeout") def create_arp_req(self): """Create ARP request applicable for this interface""" return (Ether(dst="ff:ff:ff:ff:ff:ff", src=self.remote_mac) / ARP(op=ARP.who_has, pdst=self.local_ip4, psrc=self.remote_ip4, hwsrc=self.remote_mac)) def create_ndp_req(self): """Create NDP - NS applicable for this interface""" nsma = in6_getnsma(inet_pton(socket.AF_INET6, self.local_ip6)) d = inet_ntop(socket.AF_INET6, nsma) return (Ether(dst=in6_getnsmac(nsma)) / IPv6(dst=d, src=self.remote_ip6) / ICMPv6ND_NS(tgt=self.local_ip6) / ICMPv6NDOptSrcLLAddr(lladdr=self.remote_mac)) def resolve_arp(self, pg_interface=None): """Resolve ARP using provided packet-generator interface :param pg_interface: interface used to resolve, if None then this interface is used """ if pg_interface is None: pg_interface = self self.test.logger.info("Sending ARP request for %s on port %s" % (self.local_ip4, pg_interface.name)) arp_req = self.create_arp_req() pg_interface.add_stream(arp_req) pg_interface.enable_capture() self.test.pg_start() self.test.logger.info(self.test.vapi.cli("show trace")) try: captured_packet = pg_interface.wait_for_packet(1) except: self.test.logger.info("No ARP received on port %s" % pg_interface.name) return arp_reply = captured_packet.copy() # keep original for exception # Make Dot1AD packet content recognizable to scapy if arp_reply.type == 0x88a8: arp_reply.type = 0x8100 arp_reply = Ether(str(arp_reply)) try: if arp_reply[ARP].op == ARP.is_at: self.test.logger.info("VPP %s MAC address is %s " % (self.name, arp_reply[ARP].hwsrc)) self._local_mac = arp_reply[ARP].hwsrc else: self.test.logger.info("No ARP received on port %s" % pg_interface.name) except: self.test.logger.error( ppp("Unexpected response to ARP request:", captured_packet)) raise def resolve_ndp(self, pg_interface=None, timeout=1): """Resolve NDP using provided packet-generator interface :param pg_interface: interface used to resolve, if None then this interface is used :param timeout: how long to wait for response before giving up """ if pg_interface is None: pg_interface = self self.test.logger.info("Sending NDP request for %s on port %s" % (self.local_ip6, pg_interface.name)) ndp_req = self.create_ndp_req() pg_interface.add_stream(ndp_req) pg_interface.enable_capture() self.test.pg_start() now = time.time() deadline = now + timeout # Enabling IPv6 on an interface can generate more than the # ND reply we are looking for (namely MLD). So loop through # the replies to look for want we want. while now < deadline: try: captured_packet = pg_interface.wait_for_packet( deadline - now, filter_out_fn=None) except: self.test.logger.error( "Timeout while waiting for NDP response") raise ndp_reply = captured_packet.copy() # keep original for exception # Make Dot1AD packet content recognizable to scapy if ndp_reply.type == 0x88a8: ndp_reply.type = 0x8100 ndp_reply = Ether(str(ndp_reply)) try: ndp_na = ndp_reply[ICMPv6ND_NA] opt = ndp_na[ICMPv6NDOptDstLLAddr] self.test.logger.info("VPP %s MAC address is %s " % (self.name, opt.lladdr)) self._local_mac = opt.lladdr self.test.logger.debug(self.test.vapi.cli("show trace")) # we now have the MAC we've been after return except: self.test.logger.info( ppp("Unexpected response to NDP request:", captured_packet)) now = time.time() self.test.logger.debug(self.test.vapi.cli("show trace")) raise Exception("Timeout while waiting for NDP response")