diff options
author | Peter Mikus <pmikus@cisco.com> | 2019-02-01 14:51:06 +0000 |
---|---|---|
committer | Peter Mikus <pmikus@cisco.com> | 2019-02-04 17:40:00 +0000 |
commit | 22ff475dae7f9f09e8b3b7c899731803752761c0 (patch) | |
tree | 1d72a5cce150f8bf9d3bc518fc50396c5f305099 | |
parent | 446b1f79bc4190dc3bc140d6698d7cef9c5be283 (diff) |
CSIT-1416 Remove installation of vpp from containers
Use parent system (Host, Container) installation of VPP. This will save
the internet bandwith by skip installing of prerequisites packages.
It will also skip dpkg install and simplify the process of initializing
VPP inside container.
Previosly initialization of VPP in container takes about 55s. With this
patch it is reduced to 2-3s.
This patch removes the bloated VOLUME creation between container
sidecars (a.k.a nested container) and fixes the hugepage allocation.
Change-Id: Ifa2be532edb77354657e1b84568bdc34993b00d0
Signed-off-by: Peter Mikus <pmikus@cisco.com>
-rw-r--r-- | resources/libraries/bash/function/device.sh | 29 | ||||
-rw-r--r-- | resources/libraries/python/ContainerUtils.py | 50 | ||||
-rw-r--r-- | resources/libraries/python/DUTSetup.py | 22 | ||||
-rw-r--r-- | resources/libraries/robot/performance/performance_setup.robot | 2 | ||||
-rw-r--r-- | resources/libraries/robot/shared/container.robot | 53 | ||||
-rw-r--r-- | resources/libraries/robot/shared/default.robot | 18 |
6 files changed, 80 insertions, 94 deletions
diff --git a/resources/libraries/bash/function/device.sh b/resources/libraries/bash/function/device.sh index 8aeb221fd9..7d175a9ac3 100644 --- a/resources/libraries/bash/function/device.sh +++ b/resources/libraries/bash/function/device.sh @@ -124,18 +124,12 @@ function clean_environment () { # Kill docker containers. docker rm --force "${DCR_UUIDS[@]}" || die "Cleanup containers failed!" - # Check if some container is using volume and remove all the hanged - # containers before removing volume. Command will not fail in case there - # are no containers to remove. - docker rm --force $(docker ps -q --filter volume=${DCR_VOLUMES[dut1]}) || { + # Check if there are some leftover containers and remove all. Command will + # not fail in case there are no containers to remove. + docker rm --force $(docker ps -q --filter name=${DCR_UUIDS[dut1]}) || { warn "Failed to remove hanged containers or nothing to remove!" } - # Remove DUT1 volume. - docker volume rm --force "${DCR_VOLUMES[dut1]}" || { - die "Failed to remove DUT1 volume!" - } - # Rebind interfaces back to kernel drivers. for ADDR in ${TG_PCIDEVS[@]}; do DRIVER="${TG_DRIVERS[0]}" @@ -432,10 +426,8 @@ function read_env_variables () { export "${param}" done declare -gA DCR_UUIDS - declare -gA DCR_VOLUMES DCR_UUIDS+=([tg]="${CSIT_TG_UUID}") DCR_UUIDS+=([dut1]="${CSIT_DUT1_UUID}") - DCR_VOLUMES+=([dut1]="${CSIT_DUT1_VOL}") TG_PCIDEVS=("${CSIT_TG_INTERFACES_PORT1_PCI}") TG_DRIVERS=("${CSIT_TG_INTERFACES_PORT1_DRV}") TG_PCIDEVS+=("${CSIT_TG_INTERFACES_PORT2_PCI}") @@ -479,7 +471,6 @@ function set_env_variables () { CSIT_DUT1_ARCH="$(uname -i)" || { die "Reading machine architecture failed!" } - CSIT_DUT1_VOL="${DCR_VOLUMES[dut1]}" CSIT_TG_INTERFACES_PORT1_MAC="${TG_NETMACS[0]}" CSIT_TG_INTERFACES_PORT1_PCI="${TG_PCIDEVS[0]}" CSIT_TG_INTERFACES_PORT1_DRV="${TG_DRIVERS[0]}" @@ -543,25 +534,13 @@ function start_topology_containers () { declare -gA DCR_PORTS # Docker Container PIDs (namespaces). declare -gA DCR_CPIDS - # Docker Container volumes with no relationship to the host. - declare -gA DCR_VOLUMES - - # Create DUT1 /tmp volume to be able to install VPP in "nested" container. - params=(--name DUT1_VOL_$(uuidgen)) - DCR_VOLUMES+=([dut1]="$(docker volume create "${params[@]}")") || { - die "Failed to create DUT1 /tmp volume!" - } - - # Mount DUT1_VOL as /tmp directory on DUT1 container - dcr_stc_params_dut1="--volume ${DCR_VOLUMES[dut1]}:/tmp " # Run TG and DUT1. As initial version we do support only 2-node. params=(${dcr_stc_params} --name csit-tg-$(uuidgen) ${dcr_image}) DCR_UUIDS+=([tg]="$(docker run "${params[@]}")") || { die "Failed to start TG docker container!" } - params=(${dcr_stc_params} ${dcr_stc_params_dut1} - --name csit-dut1-$(uuidgen) ${dcr_image}) + params=(${dcr_stc_params} --name csit-dut1-$(uuidgen) ${dcr_image}) DCR_UUIDS+=([dut1]="$(docker run "${params[@]}")") || { die "Failed to start DUT1 docker container!" } diff --git a/resources/libraries/python/ContainerUtils.py b/resources/libraries/python/ContainerUtils.py index fbfb890dfe..9c42a3c99c 100644 --- a/resources/libraries/python/ContainerUtils.py +++ b/resources/libraries/python/ContainerUtils.py @@ -135,19 +135,17 @@ class ContainerManager(object): self.engine.container = self.containers[container] self.engine.execute(command) - def install_vpp_in_all_containers(self): - """Install VPP into all containers.""" + def start_vpp_in_all_containers(self): + """Start VPP in all containers.""" for container in self.containers: self.engine.container = self.containers[container] # We need to install supervisor client/server system to control VPP # as a service - self.engine.execute('sleep 3; apt-get update') self.engine.install_supervisor() - self.engine.install_vpp() - self.engine.restart_vpp() + self.engine.start_vpp() def restart_vpp_in_all_containers(self): - """Restart VPP on all containers.""" + """Restart VPP in all containers.""" for container in self.containers: self.engine.container = self.containers[container] self.engine.restart_vpp() @@ -395,7 +393,9 @@ class ContainerEngine(object): def install_supervisor(self): """Install supervisord inside a container.""" - self.execute('apt-get install -y supervisor') + if isinstance(self, LXC): + self.execute('sleep 3; apt-get update') + self.execute('apt-get install -y supervisor') self.execute('echo "{config}" > {config_file} && ' 'supervisord -c {config_file}'. format( @@ -415,36 +415,19 @@ class ContainerEngine(object): 'nodaemon=false\n\n', config_file=SUPERVISOR_CONF)) - def install_vpp(self): - """Install VPP inside a container.""" - self.execute('ln -s /dev/null /etc/sysctl.d/80-vpp.conf') - # Workaround for install xenial vpp build on bionic ubuntu. - self.execute('apt-get install -y wget') - self.execute('deb=$(mktemp) && wget -O "${deb}" ' - 'http://launchpadlibrarian.net/336117627/' - 'libmbedcrypto0_2.5.1-1ubuntu1_amd64.deb && ' - 'dpkg -i "${deb}" && ' - 'rm -f "${deb}"') - self.execute('deb=$(mktemp) && wget -O "${deb}" ' - 'http://launchpadlibrarian.net/252876048/' - 'libboost-system1.58.0_1.58.0+dfsg-5ubuntu3_amd64.deb && ' - 'dpkg -i "${deb}" && ' - 'rm -f "${deb}"') - self.execute( - 'dpkg -i --force-all ' - '{guest_dir}/openvpp-testing/download_dir/*.deb'. - format(guest_dir=self.container.mnt[0].split(':')[1])) - self.execute('apt-get -f install -y') - self.execute('apt-get install -y ca-certificates') + def start_vpp(self): + """Start VPP inside a container.""" self.execute('echo "{config}" >> {config_file}'. format( config='[program:vpp]\n' 'command=/usr/bin/vpp -c /etc/vpp/startup.conf\n' + 'autostart=false\n' 'autorestart=false\n' 'redirect_stderr=true\n' 'priority=1', config_file=SUPERVISOR_CONF)) self.execute('supervisorctl reload') + self.execute('supervisorctl start vpp') def restart_vpp(self): """Restart VPP service inside a container.""" @@ -608,7 +591,7 @@ class LXC(ContainerEngine): return image = self.container.image if self.container.image else\ - "-d ubuntu -r xenial -a amd64" + "-d ubuntu -r bionic -a amd64" cmd = 'lxc-create -t download --name {c.name} -- {image} '\ '--no-validate'.format(c=self.container, image=image) @@ -627,11 +610,14 @@ class LXC(ContainerEngine): if self.container.mnt: for mount in self.container.mnt: host_dir, guest_dir = mount.split(':') + options = 'bind,create=dir' \ + if guest_dir.endswith('/') else 'bind,create=file' entry = 'lxc.mount.entry = {host_dir} '\ '/var/lib/lxc/{c.name}/rootfs{guest_dir} none ' \ - 'bind,create=dir 0 0'.format(c=self.container, - host_dir=host_dir, - guest_dir=guest_dir) + '{options} 0 0'.format(c=self.container, + host_dir=host_dir, + guest_dir=guest_dir, + options=options) ret, _, _ = self.container.ssh.exec_command_sudo( "sh -c 'echo \"{e}\" >> /var/lib/lxc/{c.name}/config'". format(e=entry, c=self.container)) diff --git a/resources/libraries/python/DUTSetup.py b/resources/libraries/python/DUTSetup.py index 4fc0e6fc9c..fd71a82440 100644 --- a/resources/libraries/python/DUTSetup.py +++ b/resources/libraries/python/DUTSetup.py @@ -614,6 +614,8 @@ class DUTSetup(object): ssh = SSH() ssh.connect(node) + cmd = 'ln -s /dev/null /etc/sysctl.d/80-vpp.conf || true' + ssh.exec_command_sudo(cmd, timeout=90) cmd = "[[ -f /etc/redhat-release ]]" return_code, _, _ = ssh.exec_command(cmd) @@ -683,6 +685,26 @@ class DUTSetup(object): return True @staticmethod + def get_docker_mergeddir(node, uuid): + """Get Docker overlay for MergedDir diff. + + :param node: DUT node. + :param uuid: Docker UUID. + :type node: dict + :type uuid: str + :returns: Docker container MergedDir. + :rtype: str + :raises RuntimeError: If getting output failed. + """ + command = "docker inspect --format='"\ + "{{{{.GraphDriver.Data.MergedDir}}}}' {uuid}".format(uuid=uuid) + message = 'Failed to get directory of {uuid} on host {host}'.\ + format(uuid=uuid, host=node['host']) + + stdout, _ = exec_cmd_no_error(node, command, sudo=True, message=message) + return stdout.strip() + + @staticmethod def get_huge_page_size(node): """Get default size of huge pages in system. diff --git a/resources/libraries/robot/performance/performance_setup.robot b/resources/libraries/robot/performance/performance_setup.robot index a70742f448..ed6f4744db 100644 --- a/resources/libraries/robot/performance/performance_setup.robot +++ b/resources/libraries/robot/performance/performance_setup.robot @@ -660,7 +660,7 @@ | | Create all '${container_group}' containers | | Configure VPP in all '${container_group}' containers | | Stop VPP service on all DUTs | ${nodes} -| | Install VPP in all '${container_group}' containers +| | Start VPP in all '${container_group}' containers | | Start VPP service on all DUTs | ${nodes} | | Append To List | ${container_groups} | ${container_group} diff --git a/resources/libraries/robot/shared/container.robot b/resources/libraries/robot/shared/container.robot index c08dc71fa6..d1ec6d2a03 100644 --- a/resources/libraries/robot/shared/container.robot +++ b/resources/libraries/robot/shared/container.robot @@ -14,9 +14,6 @@ *** Settings *** | Documentation | Keywords related to linux containers | ... -| Library | Collections -| Library | String -| ... | Library | resources.libraries.python.CpuUtils | Library | resources.libraries.python.topology.Topology @@ -34,34 +31,38 @@ | | ... | - auto_scale - If True, use same amount of Dataplane threads for | | ... | network function as DUT, otherwise use single physical core for | | ... | every network function. Type: boolean -| | ... | - set_nf_cpus: Set False if CPUs allocatation for network function per -| | ... | SUT/DUT not required. Type: boolean, default value: ${True} +| | ... | - nested: Set True if starting nested containers. +| | ... | Type: boolean, default value: ${False} | | ... | | ... | *Example:* | | ... | | ... | \| Construct container on all DUTs \| 1 \| 1 \| 1 \| 1 \| ${True} \| | | ... | | [Arguments] | ${nf_chains}=${1} | ${nf_nodes}=${1} | ${nf_chain}=${1} -| | ... | ${nf_node}=${1} | ${auto_scale}=${True} | ${set_nf_cpus}=${True} +| | ... | ${nf_node}=${1} | ${auto_scale}=${True} | ${nested}=${False} | | ... | | ${duts}= | Get Matches | ${nodes} | DUT* | | :FOR | ${dut} | IN | @{duts} | | | ${nf_id}= | Evaluate | (${nf_chain} - ${1}) * ${nf_nodes} + ${nf_node} | | | ${env}= | Create List | DEBIAN_FRONTEND=noninteractive -| | | ${tmp}= | Get Variable Value | ${tmp_volume} | /tmp -| | | ${mnt}= | Create List | ${tmp}:/mnt/host | /dev/vfio:/dev/vfio -| | | ${nf_cpus}= | Run Keyword If | ${set_nf_cpus} +| | | ${uuid}= | Get Variable Value | ${dcr_uuid} | ${Empty} +| | | ${root}= | Get Variable Value | ${dcr_root} | ${Empty} +| | | ${mnt}= | Create List +| | | ... | ${root}/tmp/:/mnt/host/ +| | | ... | ${root}/dev/vfio/:/dev/vfio/ +| | | ... | ${root}/usr/bin/vpp:/usr/bin/vpp +| | | ... | ${root}/usr/bin/vppctl:/usr/bin/vppctl +| | | ... | ${root}/usr/lib/x86_64-linux-gnu/:/usr/lib/x86_64-linux-gnu/ +| | | ... | ${root}/usr/share/vpp/:/usr/share/vpp/ +| | | ${nf_cpus}= | Set Variable | ${None} +| | | ${nf_cpus}= | Run Keyword Unless | ${nested} | | | ... | Create network function CPU list | ${dut} | | | ... | chains=${nf_chains} | nodeness=${nf_nodes} | chain_id=${nf_chain} | | | ... | node_id=${nf_node} | auto_scale=${auto_scale} -| | | ... | ELSE | Set Variable | ${None} -| | | ${uuid_str}= | Run Keyword If | '${tmp}' == '/tmp' -| | | ... | Set Variable | ${EMPTY} -| | | ... | ELSE | Remove String | ${tmp} | ${dut}_VOL | | | &{cont_args}= | Create Dictionary -| | | ... | name=${dut}_${container_group}${nf_id}${uuid_str} +| | | ... | name=${dut}_${container_group}${nf_id}${uuid} | | | ... | node=${nodes['${dut}']} | mnt=${mnt} | env=${env} -| | | Run Keyword If | ${set_nf_cpus} +| | | Run Keyword Unless | ${nested} | | | ... | Set To Dictionary | ${cont_args} | cpuset_cpus=${nf_cpus} | | | Run Keyword | ${container_group}.Construct container | &{cont_args} @@ -75,8 +76,8 @@ | | ... | - auto_scale - If True, use same amount of Dataplane threads for | | ... | network function as DUT, otherwise use single physical core for | | ... | every network function. Type: boolean -| | ... | - set_nf_cpus: Set False if CPUs allocatation for network function per -| | ... | SUT/DUT not required. Type: boolean, default value: ${True} +| | ... | - nested: Set True if starting nested containers. +| | ... | Type: boolean, default value: ${False} | | ... | | ... | *Example:* | | ... @@ -84,12 +85,12 @@ | | ... | \| ${True} \| | | ... | | [Arguments] | ${nf_chains}=${1} | ${nf_nodes}=${1} | ${nf_chain}=${1} -| | ... | ${auto_scale}=${True} | ${set_nf_cpus}=${True} +| | ... | ${auto_scale}=${True} | ${nested}=${False} | | ... | | :FOR | ${nf_node} | IN RANGE | 1 | ${nf_nodes}+1 | | | Construct container on all DUTs | nf_chains=${nf_chains} | | | ... | nf_nodes=${nf_nodes} | nf_chain=${nf_chain} | nf_node=${nf_node} -| | | ... | auto_scale=${auto_scale} | set_nf_cpus=${set_nf_cpus} +| | | ... | auto_scale=${auto_scale} | nested=${nested} | Construct chains of containers on all DUTs | | [Documentation] | Construct 1..N chains of 1..N CNFs on all DUT nodes. @@ -102,20 +103,20 @@ | | ... | - auto_scale - If True, use same amount of Dataplane threads for | | ... | network function as DUT, otherwise use single physical core for | | ... | every network function. Type: boolean -| | ... | - set_nf_cpus: Set False if CPUs allocatation for network function per -| | ... | SUT/DUT not required. Type: boolean, default value: ${True} +| | ... | - nested: Set True if starting nested containers. +| | ... | Type: boolean, default value: ${False} | | ... | | ... | *Example:* | | ... | | ... | \| Construct chains of containers on all DUTs \| 1 \| 1 \| | | ... | | [Arguments] | ${nf_chains}=${1} | ${nf_nodes}=${1} | ${auto_scale}=${True} -| | ... | ${set_nf_cpus}=${True} +| | ... | ${nested}=${False} | | ... | | :FOR | ${nf_chain} | IN RANGE | 1 | ${nf_chains}+1 | | | Construct chain of containers on all DUTs | nf_chains=${nf_chains} | | | ... | nf_nodes=${nf_nodes} | nf_chain=${nf_chain} -| | | ... | auto_scale=${auto_scale} | set_nf_cpus=${set_nf_cpus} +| | | ... | auto_scale=${auto_scale} | nested=${nested} | Acquire all '${group}' containers | | [Documentation] | Acquire all container(s) in specific container group on @@ -129,11 +130,11 @@ | | ... | | Run Keyword | ${group}.Create all containers -| Install VPP in all '${group}' containers -| | [Documentation] | Install VPP on all container(s) in specific container +| Start VPP in all '${group}' containers +| | [Documentation] | Start VPP on all container(s) in specific container | | ... | group on all DUT nodes. | | ... -| | Run Keyword | ${group}.Install VPP In All Containers +| | Run Keyword | ${group}.Start VPP In All Containers | Restart VPP in all '${group}' containers | | [Documentation] | Restart VPP on all container(s) in specific container diff --git a/resources/libraries/robot/shared/default.robot b/resources/libraries/robot/shared/default.robot index b60ebc98a7..03843c64c6 100644 --- a/resources/libraries/robot/shared/default.robot +++ b/resources/libraries/robot/shared/default.robot @@ -550,9 +550,8 @@ | | ... | default value: ${1} | | ... | | ... | _NOTE:_ This KW sets following test case variables: -| | ... | - tmp_volume - Docker volume mounted as /tmp directory on DUT1. -| | ... | - dcr_uuid - UUID string (including prefix - underscore character) of -| | ... | DUT1 /tmp volume. +| | ... | - dcr_uuid - Parent container UUID. +| | ... | - dcr_root - Parent container overlay. | | ... | | ... | *Example:* | | ... @@ -565,19 +564,18 @@ | | Import Library | resources.libraries.python.ContainerUtils.ContainerManager | | ... | engine=${container_engine} | WITH NAME | ${container_group} | | ... -| | ${tmp_volume}= | Get Environment Variable | CSIT_DUT1_VOL -| | ${dcr_uuid}= | Remove String | ${tmp_volume} | DUT1_VOL -| | Set Test Variable | ${tmp_volume} +| | ${dcr_uuid}= | Get Environment Variable | CSIT_DUT1_UUID +| | ${dcr_root}= | Run Keyword | Get Docker Mergeddir | ${nodes['DUT1']} +| | ... | ${dcr_uuid} | | Set Test Variable | ${dcr_uuid} +| | Set Test Variable | ${dcr_root} | | ... | | Construct chains of containers on all DUTs | ${chains} | ${nodeness} -| | ... | set_nf_cpus=${False} +| | ... | nested=${True} | | Acquire all '${container_group}' containers | | Create all '${container_group}' containers | | Configure VPP in all '${container_group}' containers -| | Stop VPP service on all DUTs | ${nodes} -| | Install VPP in all '${container_group}' containers -| | Start VPP service on all DUTs | ${nodes} +| | Start VPP in all '${container_group}' containers | | Append To List | ${container_groups} | ${container_group} | Tear down TAP functional test |