From 12d21f032e9a67b327101c42481a546a48a6ac21 Mon Sep 17 00:00:00 2001 From: Mauro Sardara Date: Tue, 11 Oct 2022 20:01:32 +0200 Subject: test: instrument functional tests with more logs Also: - Do not tun tests with privileged containers HICN-806 - Refactor robot test files and config.sh Currently some logs are missing from the robot output of the functional tests. This patch adds them. Ticket: HICN-805 Signed-off-by: Mauro Sardara Change-Id: I9893eb356a4dd12f4bc6347b8fe02e358c9bf737 --- scripts/functions.sh | 16 +- tests/.env | 20 ++ tests/1-node.yml | 16 +- tests/2-nodes-hicn-light.yml | 29 +- tests/2-nodes-vpp-bridge.yml | 37 +- tests/2-nodes-vpp-memif-replication.yml | 4 +- tests/2-nodes-vpp-memif.yml | 4 +- tests/2-nodes.yml | 22 +- tests/config.sh | 105 +++--- tests/functional-tests/2-nodes-hicn-light.robot | 92 +++-- tests/functional-tests/2-nodes-vpp-bridge.robot | 57 ++- .../2-nodes-vpp-memif-replication.robot | 49 ++- tests/functional-tests/2-nodes-vpp-memif.robot | 49 ++- tests/functional-tests/hicn-light-control.robot | 61 ++-- tests/functional-tests/hicn-light-ping.robot | 40 ++- tests/resources/libraries/robot/common.robot | 46 ++- tests/resources/libraries/robot/runtest.robot | 381 +++++++++++++++------ tests/run-functional.sh | 2 +- 18 files changed, 708 insertions(+), 322 deletions(-) diff --git a/scripts/functions.sh b/scripts/functions.sh index 544c283fc..9ff252416 100644 --- a/scripts/functions.sh +++ b/scripts/functions.sh @@ -114,6 +114,10 @@ function download_artifacts() { return 1 } +function is_selinuxenabled() { + sudo selinuxenabled && return 1 || return 0 +} + # Run functional tests function functional_test() { echo "*******************************************************************" @@ -129,8 +133,16 @@ function functional_test() { fi # Run functional tests - pushd ${SCRIPT_PATH}/../tests - BUILD_SOFTWARE=${build_sw} DOCKERFILE=${dockerfile_path} bash ./run-functional.sh + pushd "${SCRIPT_PATH}/../tests" + # If selinux, let's run the tests with a privileged container to bypass + # the checks, which cost also in performance + if is_selinuxenabled; then + local privileged=false + else + local privileged=true + fi + + BUILD_SOFTWARE=${build_sw} DOCKERFILE=${dockerfile_path} TEST_PRIVILEGED=${privileged} bash ./run-functional.sh popd echo "*******************************************************************" diff --git a/tests/.env b/tests/.env index bda915201..28b9570e7 100644 --- a/tests/.env +++ b/tests/.env @@ -8,8 +8,28 @@ TEST_VPP_BRIDGE=vpp-bridge TEST_VPP_MEMIF=vpp-memif TEST_VPP_MEMIF_REPLICATION=vpp-memif-replication +# Container privileged +PRIVILEGED=false + # names RTC_PRODUCER=b002:0:0:0:abcd::/80 RAAQM_PRODUCER=b002::2 PING_PRODUCER=b002::3 RAAQM_PRODUCER_NEW=b002::4 + +# Log +FORWARDER_LOG_PATH=/tmp/forwarder.log + +# IP addresses +TOPOLOGY_2_NODES_IP_NETWORK=192.168.1.0/24 +TOPOLOGY_2_NODES_IP_ADDRESS_CLIENT=192.168.1.2 +TOPOLOGY_2_NODES_IP_ADDRESS_SERVER=192.168.1.3 +TOPOLOGY_2_NODES_IP_GATEWAY=192.168.1.254 + +TOPOLOGY_2_NODES_IP6_NETWORK=2001::/64 +TOPOLOGY_2_NODES_IP6_ADDRESS_CLIENT=2001::1 +TOPOLOGY_2_NODES_IP6_ADDRESS_SERVER=2001::2 + +TOPOLOGY_1_NODE_IP_NETWORK=192.168.2.0/24 +TOPOLOGY_1_NODE_IP_ADDRESS=192.168.2.2 +TOPOLOGY_1_NODE_IP_GATEWAY=192.168.2.254 diff --git a/tests/1-node.yml b/tests/1-node.yml index 859d1b838..9e496d7b8 100644 --- a/tests/1-node.yml +++ b/tests/1-node.yml @@ -7,11 +7,13 @@ services: args: - BASE_IMAGE image: hicn-base - privileged: true container_name: forwarder working_dir: /workspace volumes: - ..:/workspace:z + networks: + the-network: + ipv4_address: ${TOPOLOGY_1_NODE_IP_ADDRESS} entrypoint: [/bin/bash, -ex, -c] command: - | @@ -19,8 +21,12 @@ services: sudo ninja -C /workspace/build-dev install fi - sudo ip addr add 192.168.1.1/24 dev eth0 - sudo hicn-light-daemon \ - --daemon --log-file /tmp/lite_client.log + sudo hicn-light-daemon --log-file /tmp/lite_client.log - tail -f /dev/null +networks: + the-network: + driver: bridge + ipam: + config: + - subnet: ${TOPOLOGY_1_NODE_IP_NETWORK} + gateway: ${TOPOLOGY_1_NODE_IP_GATEWAY} diff --git a/tests/2-nodes-hicn-light.yml b/tests/2-nodes-hicn-light.yml index 5e97c1069..50b756f26 100644 --- a/tests/2-nodes-hicn-light.yml +++ b/tests/2-nodes-hicn-light.yml @@ -8,23 +8,17 @@ services: sudo ninja -C /workspace/build-dev install fi - sudo ip link add br0 type bridge - sudo ip addr add 192.168.1.1/24 dev br0 - sudo ip link set eth0 master br0 - sudo ip link set eth0 up - sudo ip link set br0 up - sudo ip route add 192.168.1.2 via 192.168.1.1 dev br0 tee -a /tmp/hicn-light.conf <&2 "${@}" return 1 @@ -204,11 +211,15 @@ function setup() { docker-compose -f "${topology}".yml -f "${topology}-${conf}".yml up --remove-orphans --force-recreate -d sleep 10 + + # Check logs + docker-compose -f "${topology}".yml -f "${topology}-${conf}".yml logs } function start() { - conf=${1} - test=${2} + topology=${1} + conf=${2} + test=${3} if ! conf_exists "${conf}"; then error "Error: configuration does not exist." @@ -220,7 +231,18 @@ function start() { error "Error: test does not exist." fi - docker exec "${1}"-client bash -c "/workspace/tests/config.sh runtest ${tests[${TESTNAME}]}" + DOCKER_COMMAND="docker-compose -f ${topology}.yml -f ${topology}-${conf}.yml exec -T" + + ${DOCKER_COMMAND} client bash -x /workspace/tests/config.sh runtest "${tests[${TESTNAME}]}" + + # Print also forwader log + echo "Forwarder Log - CLIENT" + ${DOCKER_COMMAND} client cat "${FORWARDER_LOG_PATH}" + + echo + + echo "Forwarder Log - SERVER" + ${DOCKER_COMMAND} server cat "${FORWARDER_LOG_PATH}" } function stop() { @@ -261,7 +283,7 @@ function runtest() { # Test commands (hicn-light-control) ################################################################ INTERFACE="eth0" -ADDRESS="192.168.1.1" +ADDRESS="${TOPOLOGY_1_NODE_IP_ADDRESS}" LISTENER_NAME="udp0" LISTENER_NAME_2="udp1" CONN_NAME="conn0" @@ -272,10 +294,13 @@ COST=1 #--------------------------------------------------------------- # Helpers #--------------------------------------------------------------- + +DOCKER_COMMAND="docker-compose -f 1-node.yml exec -T client" + function exec_command() { command=$1 - output=$(docker exec forwarder hicn-light-control $command 2>&1) + output=$(${DOCKER_COMMAND} hicn-light-control "$command" 2>&1) echo "$output" } @@ -317,7 +342,7 @@ function test_listeners() { command="list listener" output=$(exec_command "${command}") - if [[ "${output}" =~ "udp0 inet4://192.168.1.1:9695" && + if [[ "${output}" =~ "udp0 inet4://${ADDRESS}:9695" && "${output}" =~ "udp1 inet4://127.0.0.1:12345" && "${output}" =~ "interface=lo" && "${output}" =~ "interface=$INTERFACE" && @@ -371,8 +396,8 @@ function test_connections() { command="list connection" output=$(exec_command "${command}") - if [[ "${output}" =~ "inet4://192.168.1.1:12345" && - "${output}" =~ "inet4://192.168.1.1:9695" && + if [[ "${output}" =~ "inet4://${ADDRESS}:12345" && + "${output}" =~ "inet4://${ADDRESS}:9695" && "${output}" =~ "conn0" && "${output}" =~ "conn1" && ! "${output}" =~ "ERROR" ]]; then echo "OK" @@ -475,27 +500,27 @@ function ctrl() { # Test ping ################################################################ function test_ping_manifest() { - docker exec forwarder bash -c 'hicn-ping-server -a intmanifest >/tmp/ping_server.log 2>&1 &' + ${DOCKER_COMMAND} bash -c 'hicn-ping-server -a intmanifest >/tmp/ping_server.log 2>&1 &' sleep 1 # 2 interests w/ 3 suffixes each (1 in header + 2 in manifest) - docker exec forwarder bash -c 'hicn-ping-client -m 6 -a 2 intmanifest 2>&1 | grep "Sent" >>/tmp/ping_client.log' + ${DOCKER_COMMAND} bash -c 'hicn-ping-client -m 6 -a 2 intmanifest 2>&1 | grep "Sent" >>/tmp/ping_client.log' sleep 1 # 2 interests w/ 3 suffixes each + 1 single interest - docker exec forwarder bash -c 'hicn-ping-client -m 7 -a 2 intmanifest 2>&1 | grep "Sent" >>/tmp/ping_client.log' + ${DOCKER_COMMAND} bash -c 'hicn-ping-client -m 7 -a 2 intmanifest 2>&1 | grep "Sent" >>/tmp/ping_client.log' sleep 1 # 2 interests w/ 3 suffixes each + 1 interest w/ 2 suffixes - docker exec forwarder bash -c 'hicn-ping-client -m 8 -a 2 intmanifest 2>&1 | grep "Sent" >>/tmp/ping_client.log' + ${DOCKER_COMMAND} bash -c 'hicn-ping-client -m 8 -a 2 intmanifest 2>&1 | grep "Sent" >>/tmp/ping_client.log' sleep 1 # 2 interests w/ 3 suffixes each + 1 single interest, # using random prefix/suffix generation - docker exec forwarder bash -c 'hicn-ping-client -m 7 -a 2 intmanifest -b RANDOM 2>&1 | grep "Sent" >>/tmp/ping_client.log' + ${DOCKER_COMMAND} bash -c 'hicn-ping-client -m 7 -a 2 intmanifest -b RANDOM 2>&1 | grep "Sent" >>/tmp/ping_client.log' # No 'failed' expected - ping_server_logs=$(docker exec forwarder cat /tmp/ping_server.log) + ping_server_logs=$(${DOCKER_COMMAND} cat /tmp/ping_server.log) if [[ $(echo $ping_server_logs | grep failed | wc -l) -ne 0 ]]; then echo "******** Server logs (ping) ********" echo "$ping_server_logs" @@ -503,7 +528,7 @@ function test_ping_manifest() { fi # No 'Timeouts: 0' expected - ping_client_logs=$(docker exec forwarder cat /tmp/ping_client.log) + ping_client_logs=$(${DOCKER_COMMAND} cat /tmp/ping_client.log) if [[ $(echo $ping_client_logs | grep -v "Timeouts: 0" | wc -l) -ne 0 ]]; then echo "******** Client logs (ping) ********" echo "$ping_client_logs" @@ -512,14 +537,14 @@ function test_ping_manifest() { } function test_ping_wrong_signature() { - docker exec forwarder bash -c 'hicn-ping-server -a intmanifest >/tmp/ping_server.log 2>&1 &' + ${DOCKER_COMMAND} bash -c 'hicn-ping-server -a intmanifest >/tmp/ping_server.log 2>&1 &' sleep 1 # Signature mismatch ('intmamifest' on server vs 'wrong_sign' on client) - docker exec forwarder bash -c 'hicn-ping-client -m 6 -a 2 wrong_sig' + ${DOCKER_COMMAND} bash -c 'hicn-ping-client -m 6 -a 2 wrong_sig' # 'failed' expected - ping_server_logs=$(docker exec forwarder cat /tmp/ping_server.log) + ping_server_logs=$(${DOCKER_COMMAND} cat /tmp/ping_server.log) if [[ $(echo $ping_server_logs | grep "failed" | wc -l) -eq 0 ]]; then echo "******** Server logs (signature fail) ********" echo "$ping_server_logs" @@ -529,10 +554,10 @@ function test_ping_wrong_signature() { function test_ping_no_server() { # Server not started to check for ping client timeout - docker exec forwarder bash -c 'hicn-ping-client -m 6 2>&1 | grep "Sent" >/tmp/ping_client.log' + ${DOCKER_COMMAND} bash -c 'hicn-ping-client -m 6 2>&1 | grep "Sent" >/tmp/ping_client.log' # 'Timeouts: 6' expected - ping_client_logs=$(docker exec forwarder cat /tmp/ping_client.log) + ping_client_logs=$(${DOCKER_COMMAND} cat /tmp/ping_client.log) if [[ $(echo $ping_client_logs | grep "Timeouts: 6" | wc -l) -eq 0 ]]; then echo "******** Client logs (timeout) ********" echo "$ping_client_logs" @@ -576,20 +601,20 @@ while (("${#}")); do 'setchannel') shift setchannel "$@" - shift 4 + shift 5 ;; 'changechannel') shift changechannel "$@" - shift 4 + shift 5 ;; 'setup') setup "${2}" "${3}" shift 3 ;; 'start') - start "${2}" "${3}" - shift 3 + start "${2}" "${3}" "${4}" + shift 4 ;; 'stop') stop "${2}" "${3}" diff --git a/tests/functional-tests/2-nodes-hicn-light.robot b/tests/functional-tests/2-nodes-hicn-light.robot index fedcc9797..1686ca9e6 100644 --- a/tests/functional-tests/2-nodes-hicn-light.robot +++ b/tests/functional-tests/2-nodes-hicn-light.robot @@ -1,34 +1,84 @@ *** Settings *** -Resource resources/libraries/robot/runtest.robot -Resource resources/libraries/robot/common.robot -Suite Setup Run Keywords -... Build Topology 2-nodes hicn-light AND -... Check Environment -Suite Teardown Run Keywords -... Destroy Topology -Resource resources/libraries/robot/runtest.robot +Resource ../resources/libraries/robot/runtest.robot +Resource ../resources/libraries/robot/common.robot + +Suite Setup Run Keywords +... Build Topology +... 2-nodes +... hicn-light +... AND +... Check Environment +Suite Teardown Run Keywords +... Destroy Topology -*** Test Cases *** +*** Test Cases *** Throughput Testing Raaqm Mobile - Run Throughput Test Raaqm hicn-light 200 500 400 + Run Throughput Test Raaqm + ... 2-nodes + ... hicn-light + ... 200 + ... 500 + ... 400 -Throughput Testing Raaqm Mobile New - Run Throughput Test Raaqm New hicn-light 200 500 400 +Throughput Testing Raaqm Mobile New Packet Format + Run Throughput Test Raaqm New Packet Format + ... 2-nodes + ... hicn-light + ... 200 + ... 500 + ... 400 Throughput Testing CBR Mobile - Run Throughput Test CBR hicn-light 200 500 400 + Run Throughput Test CBR + ... 2-nodes + ... hicn-light + ... 20 + ... 500 + ... 400 -Throughput Testing CBR Mobile New - Run Throughput Test CBR New hicn-light 200 500 400 +Throughput Testing CBR Mobile New Packet Format + Run Throughput Test CBR New Packet Format + ... 2-nodes + ... hicn-light + ... 200 + ... 500 + ... 400 RTC Testing Mobile - Run RTC Test hicn-light 4 4 4 + Run RTC Test + ... 2-nodes + ... hicn-light + ... 4 + ... 4 + ... 4 Latency Testing Mobile - Set Link hicn-light 500 1 0 0 - Run Latency Test hicn-light 3000 3000 3000 + Set Link + ... 2-nodes + ... hicn-light + ... 500 + ... 1 + ... 0 + ... 0 + Run Latency Test + ... 2-nodes + ... hicn-light + ... 3000 + ... 3000 + ... 3000 -Latency Testing Mobile New - Set Link hicn-light 500 1 0 0 - Run Latency Test New hicn-light 3000 3000 3000 +Latency Testing Mobile New Packet Format + Set Link + ... 2-nodes + ... hicn-light + ... 500 + ... 1 + ... 0 + ... 0 + Run Latency Test New Packet Format + ... 2-nodes + ... hicn-light + ... 3000 + ... 3000 + ... 3000 diff --git a/tests/functional-tests/2-nodes-vpp-bridge.robot b/tests/functional-tests/2-nodes-vpp-bridge.robot index 83c8818ab..fd3d42fe0 100644 --- a/tests/functional-tests/2-nodes-vpp-bridge.robot +++ b/tests/functional-tests/2-nodes-vpp-bridge.robot @@ -1,24 +1,53 @@ *** Settings *** -Resource resources/libraries/robot/runtest.robot -Resource resources/libraries/robot/common.robot -Suite Setup Run Keywords -... Build Topology 2-nodes vpp-bridge AND -... Check Environment -Suite Teardown Run Keywords -... Destroy Topology -Resource resources/libraries/robot/runtest.robot +Resource ../resources/libraries/robot/runtest.robot +Resource ../resources/libraries/robot/common.robot + +Suite Setup Run Keywords +... Build Topology +... 2-nodes +... vpp-bridge +... AND +... Check Environment +Suite Teardown Run Keywords +... Destroy Topology -*** Test Cases *** +*** Test Cases *** Throughput Testing Raaqm Server VPP bridge - Run Throughput Test Raaqm vpp-bridge 500 500 500 + Run Throughput Test Raaqm + ... 2-nodes + ... vpp-bridge + ... 500 + ... 500 + ... 500 Throughput Testing CBR Server VPP bridge - Run Throughput Test CBR vpp-bridge 1000 1300 1200 + Run Throughput Test CBR + ... 2-nodes + ... vpp-bridge + ... 1000 + ... 1300 + ... 1200 RTC Testing Server VPP bridge - Run RTC Test vpp-bridge 4 4 4 + Run RTC Test + ... 2-nodes + ... vpp-bridge + ... 4 + ... 4 + ... 4 Latency Testing Server VPP bridge - Set Link hicn-light 500 1 0 0 - Run Latency Test vpp-bridge 3000 3000 3000 + Set Link + ... 2-nodes + ... hicn-light + ... 500 + ... 1 + ... 0 + ... 0 + Run Latency Test + ... 2-nodes + ... vpp-bridge + ... 3000 + ... 3000 + ... 3000 diff --git a/tests/functional-tests/2-nodes-vpp-memif-replication.robot b/tests/functional-tests/2-nodes-vpp-memif-replication.robot index 8c13f4fb9..186eb5c94 100644 --- a/tests/functional-tests/2-nodes-vpp-memif-replication.robot +++ b/tests/functional-tests/2-nodes-vpp-memif-replication.robot @@ -1,23 +1,46 @@ *** Settings *** -Resource resources/libraries/robot/runtest.robot -Resource resources/libraries/robot/common.robot -Suite Setup Run Keywords -... Build Topology 2-nodes vpp-memif-replication AND -... Check Environment -Suite Teardown Run Keywords -... Destroy Topology -Resource resources/libraries/robot/runtest.robot +Resource ../resources/libraries/robot/runtest.robot +Resource ../resources/libraries/robot/common.robot + +Suite Setup Run Keywords +... Build Topology +... 2-nodes +... vpp-memif-replication +... AND +... Check Environment +Suite Teardown Run Keywords +... Destroy Topology -*** Test Cases *** +*** Test Cases *** Throughput Testing Raaqm Server VPP memif replication - Run Throughput Test Raaqm vpp-memif-replication 500 500 500 + Run Throughput Test Raaqm + ... 2-nodes + ... vpp-memif-replication + ... 500 + ... 500 + ... 500 Throughput Testing CBR Server VPP memif - Run Throughput Test CBR vpp-memif-replication 2000 2000 2000 + Run Throughput Test CBR + ... 2-nodes + ... vpp-memif-replication + ... 2000 + ... 2000 + ... 2000 RTC Testing Server VPP memif replication - Run RTC Test vpp-memif-replication 4 4 4 + Run RTC Test + ... 2-nodes + ... vpp-memif-replication + ... 4 + ... 4 + ... 4 Latency Testing Server VPP memif replication - Run Latency Test vpp-memif-replication 3000 3000 3000 + Run Latency Test + ... 2-nodes + ... vpp-memif-replication + ... 3000 + ... 3000 + ... 3000 diff --git a/tests/functional-tests/2-nodes-vpp-memif.robot b/tests/functional-tests/2-nodes-vpp-memif.robot index 1a69da787..ed9ab1143 100644 --- a/tests/functional-tests/2-nodes-vpp-memif.robot +++ b/tests/functional-tests/2-nodes-vpp-memif.robot @@ -1,23 +1,46 @@ *** Settings *** -Resource resources/libraries/robot/runtest.robot -Resource resources/libraries/robot/common.robot -Suite Setup Run Keywords -... Build Topology 2-nodes vpp-memif AND -... Check Environment -Suite Teardown Run Keywords -... Destroy Topology -Resource resources/libraries/robot/runtest.robot +Resource ../resources/libraries/robot/runtest.robot +Resource ../resources/libraries/robot/common.robot + +Suite Setup Run Keywords +... Build Topology +... 2-nodes +... vpp-memif +... AND +... Check Environment +Suite Teardown Run Keywords +... Destroy Topology -*** Test Cases *** +*** Test Cases *** Throughput Testing Raaqm Server VPP memif - Run Throughput Test Raaqm vpp-memif 500 500 500 + Run Throughput Test Raaqm + ... 2-nodes + ... vpp-memif + ... 500 + ... 500 + ... 500 Throughput Testing CBR Server VPP memif - Run Throughput Test CBR vpp-memif 2000 2000 2000 + Run Throughput Test CBR + ... 2-nodes + ... vpp-memif + ... 2000 + ... 2000 + ... 2000 RTC Testing Server VPP memif - Run RTC Test vpp-memif 4 4 4 + Run RTC Test + ... 2-nodes + ... vpp-memif + ... 4 + ... 4 + ... 4 Latency Testing Server VPP memif - Run Latency Test vpp-memif 3000 3000 3000 + Run Latency Test + ... 2-nodes + ... vpp-memif + ... 3000 + ... 3000 + ... 3000 diff --git a/tests/functional-tests/hicn-light-control.robot b/tests/functional-tests/hicn-light-control.robot index e29fc51d6..147226188 100644 --- a/tests/functional-tests/hicn-light-control.robot +++ b/tests/functional-tests/hicn-light-control.robot @@ -1,29 +1,48 @@ *** Settings *** -Resource resources/libraries/robot/common.robot -Test Setup Run Keywords -... Build Topology 1-node AND -... Check Environment -Test Teardown Run Keywords -... Destroy Topology +Resource ../resources/libraries/robot/common.robot + +Test Setup Run Keywords +... Build Topology +... 1-node +... AND +... Check Environment +Test Teardown Run Keywords +... Destroy Topology + *** Test Cases *** Listeners - Log to console Test listeners - ${result} = Run Process ${EXECDIR}/config.sh ctrl listeners - Log Many stdout: ${result.stdout} - Should Be Equal As Integers ${result.rc} 0 - Should Not Contain ${result.stdout} FAILED + Log to console Test listeners + ${result} = Run Process + ... bash + ... -x + ... ${EXECDIR}/config.sh + ... ctrl + ... listeners + Log Many stdout: ${result.stdout} + Should Be Equal As Integers ${result.rc} 0 + Should Not Contain ${result.stdout} FAILED Connections - Log to console Test connections - ${result} = Run Process ${EXECDIR}/config.sh ctrl connections - Log Many stdout: ${result.stdout} - Should Be Equal As Integers ${result.rc} 0 - Should Not Contain ${result.stdout} FAILED + Log to console Test connections + ${result} = Run Process + ... bash + ... -x + ... ${EXECDIR}/config.sh + ... ctrl + ... connections + Log Many stdout: ${result.stdout} + Should Be Equal As Integers ${result.rc} 0 + Should Not Contain ${result.stdout} FAILED Routes - Log to console Test routes - ${result} = Run Process ${EXECDIR}/config.sh ctrl routes - Log Many stdout: ${result.stdout} - Should Be Equal As Integers ${result.rc} 0 - Should Not Contain ${result.stdout} FAILED + Log to console Test routes + ${result} = Run Process + ... bash + ... -x + ... ${EXECDIR}/config.sh + ... ctrl + ... routes + Log Many stdout: ${result.stdout} + Should Be Equal As Integers ${result.rc} 0 + Should Not Contain ${result.stdout} FAILED diff --git a/tests/functional-tests/hicn-light-ping.robot b/tests/functional-tests/hicn-light-ping.robot index 238f49e0e..ba6beed42 100644 --- a/tests/functional-tests/hicn-light-ping.robot +++ b/tests/functional-tests/hicn-light-ping.robot @@ -1,26 +1,30 @@ *** Settings *** -Resource resources/libraries/robot/common.robot -Test Setup Run Keywords -... Build Topology 1-node AND -... Check Environment -Test Teardown Run Keywords -... Destroy Topology +Resource ../resources/libraries/robot/common.robot + +Test Setup Run Keywords +... Build Topology +... 1-node +... AND +... Check Environment +Test Teardown Run Keywords +... Destroy Topology + *** Test Cases *** Ping with manifest - Log to console Test ping with manifest - ${result} = Run Process ${EXECDIR}/config.sh ping manifest - Log Many stdout: ${result.stdout} - Should Be Equal As Integers ${result.rc} 0 + Log to console Test ping with manifest + ${result} = Run Process bash -x ${EXECDIR}/config.sh ping manifest + Log Many stdout: ${result.stdout} + Should Be Equal As Integers ${result.rc} 0 Ping wrong signature - Log to console Test ping with wrong signature - ${result} = Run Process ${EXECDIR}/config.sh ping signature - Log Many stdout: ${result.stdout} - Should Be Equal As Integers ${result.rc} 0 + Log to console Test ping with wrong signature + ${result} = Run Process bash -x ${EXECDIR}/config.sh ping signature + Log Many stdout: ${result.stdout} + Should Be Equal As Integers ${result.rc} 0 Ping timeout - Log to console Test ping timeout - ${result} = Run Process ${EXECDIR}/config.sh ping timeout - Log Many stdout: ${result.stdout} - Should Be Equal As Integers ${result.rc} 0 + Log to console Test ping timeout + ${result} = Run Process bash -x ${EXECDIR}/config.sh ping timeout + Log Many stdout: ${result.stdout} + Should Be Equal As Integers ${result.rc} 0 diff --git a/tests/resources/libraries/robot/common.robot b/tests/resources/libraries/robot/common.robot index 21d1ace1d..921d79b4d 100644 --- a/tests/resources/libraries/robot/common.robot +++ b/tests/resources/libraries/robot/common.robot @@ -1,25 +1,39 @@ *** Settings *** -Library OperatingSystem -Library Process -Library String +Library OperatingSystem +Library Process +Library String -*** Variables *** *** Keywords *** - Build Topology - [Arguments] ${TEST_TOPOLOGY}=${NONE} ${TEST_CONFIGURATION}=${NONE} - Log to console Building topology ${TEST_TOPOLOGY} ${TEST_CONFIGURATION} - ${result_setup} = Run Process ${EXECDIR}/config.sh build setup ${TEST_TOPOLOGY} ${TEST_CONFIGURATION} stdout=${TEMPDIR}/stdout.txt stderr=${TEMPDIR}/stderr.txt - Log to console Done - Log Many stdout: ${result_setup.stdout} stderr: ${result_setup.stderr} - Should Be Equal As Integers ${result_setup.rc} 0 + [Arguments] + ... ${TEST_TOPOLOGY}=${NONE} + ... ${TEST_CONFIGURATION}=${NONE} + Log to console + ... Building topology ${TEST_TOPOLOGY} ${TEST_CONFIGURATION} + ${result_setup} = Run Process + ... ${EXECDIR}/config.sh + ... build + ... setup + ... ${TEST_TOPOLOGY} + ... ${TEST_CONFIGURATION} + ... stdout=${TEMPDIR}/stdout.txt + ... stderr=${TEMPDIR}/stderr.txt + Log to console Done + Log Many + ... stdout: ${result_setup.stdout} + ... stderr: ${result_setup.stderr} + Should Be Equal As Integers ${result_setup.rc} 0 Check Environment - ${result} = Run Process docker ps - Log Many stdout: ${result.stdout} stderr: ${result.stderr} + ${result} = Run Process docker ps + Log Many + ... stdout: ${result.stdout} + ... stderr: ${result.stderr} Destroy Topology - ${result_teardown} = Run Process ${EXECDIR}/config.sh stopall - Log Many stdout: ${result_teardown.stdout} stderr: ${result_teardown.stderr} - Should Be Equal As Integers ${result_teardown.rc} 0 + ${result_teardown} = Run Process ${EXECDIR}/config.sh stopall + Log Many + ... stdout: ${result_teardown.stdout} + ... stderr: ${result_teardown.stderr} + Should Be Equal As Integers ${result_teardown.rc} 0 diff --git a/tests/resources/libraries/robot/runtest.robot b/tests/resources/libraries/robot/runtest.robot index 9a3da8647..e6309a9ed 100644 --- a/tests/resources/libraries/robot/runtest.robot +++ b/tests/resources/libraries/robot/runtest.robot @@ -1,134 +1,293 @@ *** Settings *** -Library OperatingSystem -Library Process -Library String +Library OperatingSystem +Library Process +Library String -*** Variables *** *** Keywords *** - Infra ${VALUE} Run Process ${EXECDIR}/config.sh ${VALUE} Run Test - [Arguments] ${TEST_SETUP}=${NONE} ${TESTID}=${NONE} ${EXPECTED_MIN}=${NONE} ${EXPECTED_MAX}=${NONE} ${EXPECTED_AVG}=${NONE} - ${result_test} = Run Process ${EXECDIR}/config.sh start ${TEST_SETUP} ${TESTID} stdout=${TEMPDIR}/stdout.txt stderr=${TEMPDIR}/stderr.txt - Log Many stdout: ${result_test.stdout} stderr: ${result_test.stderr} - @{min_max_avg} = Split String ${result_test.stdout.strip()} - Log To Console Min Max Average Array: @{min_max_avg} - IF '${TESTID}' == 'rtc' - Should Be True ${min_max_avg}[0] == ${EXPECTED_MIN} msg="Min does not match (${min_max_avg}[0] != ${EXPECTED_MIN})" - Should Be True ${min_max_avg}[1] == ${EXPECTED_MAX} msg="Max does not match (${min_max_avg}[1] != ${EXPECTED_MAX})" - Should Be True ${min_max_avg}[2] == ${EXPECTED_AVG} msg="Avg does not match (${min_max_avg}[2] != ${EXPECTED_AVG})" - ELSE IF '${TESTID}' == 'requin' - Should Be True ${min_max_avg}[0] >= ${EXPECTED_MIN} msg="Min does not match (${min_max_avg}[0] < ${EXPECTED_MIN})" - Should Be True ${min_max_avg}[1] >= ${EXPECTED_MAX} msg="Max does not match (${min_max_avg}[1] < ${EXPECTED_MAX})" - Should Be True ${min_max_avg}[2] >= ${EXPECTED_AVG} msg="Avg does not match (${min_max_avg}[2] < ${EXPECTED_AVG})" - ELSE IF '${TESTID}' == 'requin-new' - Should Be True ${min_max_avg}[0] >= ${EXPECTED_MIN} msg="Min does not match (${min_max_avg}[0] < ${EXPECTED_MIN})" - Should Be True ${min_max_avg}[1] >= ${EXPECTED_MAX} msg="Max does not match (${min_max_avg}[1] < ${EXPECTED_MAX})" - Should Be True ${min_max_avg}[2] >= ${EXPECTED_AVG} msg="Avg does not match (${min_max_avg}[2] < ${EXPECTED_AVG})" - ELSE IF '${TESTID}' == 'latency' - Should Be True ${min_max_avg}[0] <= ${EXPECTED_MIN} msg="Min does not match (${min_max_avg}[0] > ${EXPECTED_MIN})" - Should Be True ${min_max_avg}[1] <= ${EXPECTED_MAX} msg="Max does not match (${min_max_avg}[1] > ${EXPECTED_MAX})" - Should Be True ${min_max_avg}[2] <= ${EXPECTED_AVG} msg="Avg does not match (${min_max_avg}[2] > ${EXPECTED_AVG})" - ELSE IF '${TESTID}' == 'latency-new' - Should Be True ${min_max_avg}[0] <= ${EXPECTED_MIN} msg="Min does not match (${min_max_avg}[0] > ${EXPECTED_MIN})" - Should Be True ${min_max_avg}[1] <= ${EXPECTED_MAX} msg="Max does not match (${min_max_avg}[1] > ${EXPECTED_MAX})" - Should Be True ${min_max_avg}[2] <= ${EXPECTED_AVG} msg="Avg does not match (${min_max_avg}[2] > ${EXPECTED_AVG})" - ELSE IF '${TESTID}' == 'cbr' - Should Be True ${min_max_avg}[0] >= ${EXPECTED_MIN} msg="Min does not match (${min_max_avg}[0] < ${EXPECTED_MIN})" - Should Be True ${min_max_avg}[1] >= ${EXPECTED_MAX} msg="Max does not match (${min_max_avg}[1] < ${EXPECTED_MAX})" - Should Be True ${min_max_avg}[2] >= ${EXPECTED_AVG} msg="Avg does not match (${min_max_avg}[2] < ${EXPECTED_AVG})" - ELSE IF '${TESTID}' == 'cbr-new' - Should Be True ${min_max_avg}[0] >= ${EXPECTED_MIN} msg="Min does not match (${min_max_avg}[0] < ${EXPECTED_MIN})" - Should Be True ${min_max_avg}[1] >= ${EXPECTED_MAX} msg="Max does not match (${min_max_avg}[1] < ${EXPECTED_MAX})" - Should Be True ${min_max_avg}[2] >= ${EXPECTED_AVG} msg="Avg does not match (${min_max_avg}[2] < ${EXPECTED_AVG})" + [Arguments] + ... ${TEST_TOPOLOGY}=${NONE} + ... ${TEST_SETUP}=${NONE} + ... ${TESTID}=${NONE} + ... ${EXPECTED_MIN}=${NONE} + ... ${EXPECTED_MAX}=${NONE} + ... ${EXPECTED_AVG}=${NONE} + ${result_test} = Run Process + ... ${EXECDIR}/config.sh + ... start + ... ${TEST_TOPOLOGY} + ... ${TEST_SETUP} + ... ${TESTID} + ... stdout=${TEMPDIR}/stdout.txt + ... stderr=${TEMPDIR}/stderr.txt + Log Many stdout: ${result_test.stdout} stderr: ${result_test.stderr} + ${min_max_avg_line} = Get Line ${result_test.stdout} 0 + @{min_max_avg} = Split String ${min_max_avg_line.strip()} + Log To Console Min Max Average Array: @{min_max_avg} + IF '${TESTID}' == 'rtc' + Should Be True + ... ${min_max_avg}[0] == ${EXPECTED_MIN} + ... msg="Min does not match (${min_max_avg}[0] != ${EXPECTED_MIN})" + Should Be True + ... ${min_max_avg}[1] == ${EXPECTED_MAX} + ... msg="Max does not match (${min_max_avg}[1] != ${EXPECTED_MAX})" + Should Be True + ... ${min_max_avg}[2] == ${EXPECTED_AVG} + ... msg="Avg does not match (${min_max_avg}[2] != ${EXPECTED_AVG})" + ELSE IF '${TESTID}' == 'requin' + Should Be True + ... ${min_max_avg}[0] >= ${EXPECTED_MIN} + ... msg="Min does not match (${min_max_avg}[0] < ${EXPECTED_MIN})" + Should Be True + ... ${min_max_avg}[1] >= ${EXPECTED_MAX} + ... msg="Max does not match (${min_max_avg}[1] < ${EXPECTED_MAX})" + Should Be True + ... ${min_max_avg}[2] >= ${EXPECTED_AVG} + ... msg="Avg does not match (${min_max_avg}[2] < ${EXPECTED_AVG})" + ELSE IF '${TESTID}' == 'requin-new-paket-format' + Should Be True + ... ${min_max_avg}[0] >= ${EXPECTED_MIN} + ... msg="Min does not match (${min_max_avg}[0] < ${EXPECTED_MIN})" + Should Be True + ... ${min_max_avg}[1] >= ${EXPECTED_MAX} + ... msg="Max does not match (${min_max_avg}[1] < ${EXPECTED_MAX})" + Should Be True + ... ${min_max_avg}[2] >= ${EXPECTED_AVG} + ... msg="Avg does not match (${min_max_avg}[2] < ${EXPECTED_AVG})" + ELSE IF '${TESTID}' == 'latency' + Should Be True + ... ${min_max_avg}[0] <= ${EXPECTED_MIN} + ... msg="Min does not match (${min_max_avg}[0] > ${EXPECTED_MIN})" + Should Be True + ... ${min_max_avg}[1] <= ${EXPECTED_MAX} + ... msg="Max does not match (${min_max_avg}[1] > ${EXPECTED_MAX})" + Should Be True + ... ${min_max_avg}[2] <= ${EXPECTED_AVG} + ... msg="Avg does not match (${min_max_avg}[2] > ${EXPECTED_AVG})" + ELSE IF '${TESTID}' == 'latency-new-paket-format' + Should Be True + ... ${min_max_avg}[0] <= ${EXPECTED_MIN} + ... msg="Min does not match (${min_max_avg}[0] > ${EXPECTED_MIN})" + Should Be True + ... ${min_max_avg}[1] <= ${EXPECTED_MAX} + ... msg="Max does not match (${min_max_avg}[1] > ${EXPECTED_MAX})" + Should Be True + ... ${min_max_avg}[2] <= ${EXPECTED_AVG} + ... msg="Avg does not match (${min_max_avg}[2] > ${EXPECTED_AVG})" + ELSE IF '${TESTID}' == 'cbr' + Should Be True + ... ${min_max_avg}[0] >= ${EXPECTED_MIN} + ... msg="Min does not match (${min_max_avg}[0] < ${EXPECTED_MIN})" + Should Be True + ... ${min_max_avg}[1] >= ${EXPECTED_MAX} + ... msg="Max does not match (${min_max_avg}[1] < ${EXPECTED_MAX})" + Should Be True + ... ${min_max_avg}[2] >= ${EXPECTED_AVG} + ... msg="Avg does not match (${min_max_avg}[2] < ${EXPECTED_AVG})" + ELSE IF '${TESTID}' == 'cbr-new-paket-format' + Should Be True + ... ${min_max_avg}[0] >= ${EXPECTED_MIN} + ... msg="Min does not match (${min_max_avg}[0] < ${EXPECTED_MIN})" + Should Be True + ... ${min_max_avg}[1] >= ${EXPECTED_MAX} + ... msg="Max does not match (${min_max_avg}[1] < ${EXPECTED_MAX})" + Should Be True + ... ${min_max_avg}[2] >= ${EXPECTED_AVG} + ... msg="Avg does not match (${min_max_avg}[2] < ${EXPECTED_AVG})" ELSE - Fail "Provided Test ID does not exist" + Fail "Provided Test ID does not exist" END Set Link - [Documentation] Configure link rate/delay/jitter/loss - ... Arguments: - ... ${RATE} Rate of the link - ... ${DELAY} Delay of the link - ... ${JITTER} Jitter of the link - ... ${LOSS} Loss of the link - [Arguments] ${TEST_SETUP}=${NONE} - ... ${RATE}=${NONE} - ... ${DELAY}=${NONE} - ... ${JITTER}=${NONE} - ... ${LOSS}=${NONE} - ${result_link} = Run Process ${EXECDIR}/config.sh setchannel ${TEST_SETUP} server ${RATE}-${DELAY}-${JITTER}-${LOSS} - Log Many stdout: ${result_link.stdout} stderr: ${result_link.stderr} + [Documentation] + ... Configure link rate/delay/jitter/loss + ... Arguments: + ... ${TEST_TOPOLOGY} The topology of the test. + ... ${RATE} Rate of the link + ... ${DELAY} Delay of the link + ... ${JITTER} Jitter of the link + ... ${LOSS} Loss of the link + [Arguments] + ... ${TEST_TOPOLOGY}=${NONE} + ... ${TEST_SETUP}=${NONE} + ... ${RATE}=${NONE} + ... ${DELAY}=${NONE} + ... ${JITTER}=${NONE} + ... ${LOSS}=${NONE} + ${result_link} = Run Process + ... ${EXECDIR}/config.sh + ... setchannel + ... ${TEST_TOPOLOGY} + ... ${TEST_SETUP} + ... server + ... eth0 + ... ${RATE}-${DELAY}-${JITTER}-${LOSS} + Log Many stdout: ${result_link.stdout} stderr: ${result_link.stderr} Run Latency Test - [Documentation] Run hicn-ping on the ${TEST_SETUP} topology and measure latency. - ... Arguments: - ... ${TEST_SETUP} The setup of the test. - ... ${EXPECTED_MIN} The expected min latency - ... ${EXPECTED_MAX} The expected max latency - ... ${EXPECTED_AVG} The expected avg latency - [Arguments] ${TEST_SETUP}=${NONE} ${EXPECTED_MIN}=${NONE} ${EXPECTED_MAX}=${NONE} ${EXPECTED_AVG}=${NONE} - Run Test ${TEST_SETUP} latency ${EXPECTED_MIN} ${EXPECTED_MAX} ${EXPECTED_AVG} + [Documentation] + ... Run hicn-ping on the \${TEST_SETUP} topology and measure latency. + ... Arguments: + ... ${TEST_TOPOLOGY} The topology of the test. + ... ${TEST_SETUP} The setup of the test. + ... ${EXPECTED_MIN} The expected min latency + ... ${EXPECTED_MAX} The expected max latency + ... ${EXPECTED_AVG} The expected avg latency + [Arguments] + ... ${TEST_TOPOLOGY}=${NONE} + ... ${TEST_SETUP}=${NONE} + ... ${EXPECTED_MIN}=${NONE} + ... ${EXPECTED_MAX}=${NONE} + ... ${EXPECTED_AVG}=${NONE} + Run Test + ... ${TEST_TOPOLOGY} + ... ${TEST_SETUP} + ... latency + ... ${EXPECTED_MIN} + ... ${EXPECTED_MAX} + ... ${EXPECTED_AVG} Run Throughput Test Raaqm - [Documentation] Run hiperf on the ${TEST_SETUP} topology and measure throughput. - ... Arguments: - ... ${TEST_SETUP} The setup of the test. - ... ${EXPECTED_MIN} The expected min throughput - ... ${EXPECTED_MAX} The expected max throughput - ... ${EXPECTED_AVG} The expected avg throughput - [Arguments] ${TEST_SETUP}=${NONE} ${EXPECTED_MIN}=${NONE} ${EXPECTED_MAX}=${NONE} ${EXPECTED_AVG}=${NONE} - Run Test ${TEST_SETUP} requin ${EXPECTED_MIN} ${EXPECTED_MAX} ${EXPECTED_AVG} + [Documentation] + ... Run hiperf on the \${TEST_TOPOLOGY}-\${TEST_SETUP} topology + ... and measure throughput. + ... Arguments: + ... ${TEST_TOPOLOGY} The topology of the test. + ... ${TEST_SETUP} The setup of the test. + ... ${EXPECTED_MIN} The expected min throughput + ... ${EXPECTED_MAX} The expected max throughput + ... ${EXPECTED_AVG} The expected avg throughput + [Arguments] + ... ${TEST_TOPOLOGY}=${NONE} + ... ${TEST_SETUP}=${NONE} + ... ${EXPECTED_MIN}=${NONE} + ... ${EXPECTED_MAX}=${NONE} + ... ${EXPECTED_AVG}=${NONE} + Run Test + ... ${TEST_TOPOLOGY} + ... ${TEST_SETUP} + ... requin + ... ${EXPECTED_MIN} + ... ${EXPECTED_MAX} + ... ${EXPECTED_AVG} -Run Throughput Test Raaqm New - [Documentation] Run hiperf on the ${TEST_SETUP} topology and measure throughput. - ... Arguments: - ... ${TEST_SETUP} The setup of the test. - ... ${EXPECTED_MIN} The expected min throughput - ... ${EXPECTED_MAX} The expected max throughput - ... ${EXPECTED_AVG} The expected avg throughput - [Arguments] ${TEST_SETUP}=${NONE} ${EXPECTED_MIN}=${NONE} ${EXPECTED_MAX}=${NONE} ${EXPECTED_AVG}=${NONE} - Run Test ${TEST_SETUP} requin ${EXPECTED_MIN} ${EXPECTED_MAX} ${EXPECTED_AVG} +Run Throughput Test Raaqm New Packet Format + [Documentation] + ... Run hiperf on the \${TEST_SETUP} topology and measure throughput. + ... Arguments: + ... ${TEST_TOPOLOGY} The topology of the test. + ... ${TEST_SETUP} The setup of the test. + ... ${EXPECTED_MIN} The expected min throughput + ... ${EXPECTED_MAX} The expected max throughput + ... ${EXPECTED_AVG} The expected avg throughput + [Arguments] + ... ${TEST_TOPOLOGY}=${NONE} + ... ${TEST_SETUP}=${NONE} + ... ${EXPECTED_MIN}=${NONE} + ... ${EXPECTED_MAX}=${NONE} + ... ${EXPECTED_AVG}=${NONE} + Run Test + ... ${TEST_TOPOLOGY} + ... ${TEST_SETUP} + ... requin-new-paket-format + ... ${EXPECTED_MIN} + ... ${EXPECTED_MAX} + ... ${EXPECTED_AVG} Run Throughput Test CBR - [Documentation] Run hiperf on the ${TEST_SETUP} topology and measure throughput. - ... Arguments: - ... ${TEST_SETUP} The setup of the test. - ... ${EXPECTED_MIN} The expected min throughput - ... ${EXPECTED_MAX} The expected max throughput - ... ${EXPECTED_AVG} The expected avg throughput - [Arguments] ${TEST_SETUP}=${NONE} ${EXPECTED_MIN}=${NONE} ${EXPECTED_MAX}=${NONE} ${EXPECTED_AVG}=${NONE} - Run Test ${TEST_SETUP} cbr ${EXPECTED_MIN} ${EXPECTED_MAX} ${EXPECTED_AVG} + [Documentation] + ... Run hiperf on the \${TEST_SETUP} topology and measure throughput. + ... Arguments: + ... ${TEST_TOPOLOGY} The topology of the test. + ... ${TEST_SETUP} The setup of the test. + ... ${EXPECTED_MIN} The expected min throughput + ... ${EXPECTED_MAX} The expected max throughput + ... ${EXPECTED_AVG} The expected avg throughput + [Arguments] + ... ${TEST_TOPOLOGY}=${NONE} + ... ${TEST_SETUP}=${NONE} + ... ${EXPECTED_MIN}=${NONE} + ... ${EXPECTED_MAX}=${NONE} + ... ${EXPECTED_AVG}=${NONE} + Run Test + ... ${TEST_TOPOLOGY} + ... ${TEST_SETUP} + ... cbr + ... ${EXPECTED_MIN} + ... ${EXPECTED_MAX} + ... ${EXPECTED_AVG} -Run Throughput Test CBR New - [Documentation] Run hiperf on the ${TEST_SETUP} topology and measure throughput. - ... Arguments: - ... ${TEST_SETUP} The setup of the test. - ... ${EXPECTED_MIN} The expected min throughput - ... ${EXPECTED_MAX} The expected max throughput - ... ${EXPECTED_AVG} The expected avg throughput - [Arguments] ${TEST_SETUP}=${NONE} ${EXPECTED_MIN}=${NONE} ${EXPECTED_MAX}=${NONE} ${EXPECTED_AVG}=${NONE} - Run Test ${TEST_SETUP} cbr ${EXPECTED_MIN} ${EXPECTED_MAX} ${EXPECTED_AVG} +Run Throughput Test CBR New Packet Format + [Documentation] + ... Run hiperf on the \${TEST_SETUP} topology and measure throughput. + ... Arguments: + ... ${TEST_TOPOLOGY} The topology of the test. + ... ${TEST_SETUP} The setup of the test. + ... ${EXPECTED_MIN} The expected min throughput + ... ${EXPECTED_MAX} The expected max throughput + ... ${EXPECTED_AVG} The expected avg throughput + [Arguments] + ... ${TEST_TOPOLOGY}=${NONE} + ... ${TEST_SETUP}=${NONE} + ... ${EXPECTED_MIN}=${NONE} + ... ${EXPECTED_MAX}=${NONE} + ... ${EXPECTED_AVG}=${NONE} + Run Test + ... ${TEST_TOPOLOGY} + ... ${TEST_SETUP} + ... cbr-new-paket-format + ... ${EXPECTED_MIN} + ... ${EXPECTED_MAX} + ... ${EXPECTED_AVG} Run RTC Test - [Documentation] Run hiperf RTC on the ${TEST_SETUP} topology and check consumer syncs to producer bitrate. - ... Arguments: - ... ${TEST_SETUP} The setup of the test. - ... ${EXPECTED_MIN} The expected min bitrate - ... ${EXPECTED_MAX} The expected max bitrate - ... ${EXPECTED_AVG} The expected avg bitrate - [Arguments] ${TEST_SETUP}=${NONE} ${EXPECTED_MIN}=${NONE} ${EXPECTED_MAX}=${NONE} ${EXPECTED_AVG}=${NONE} - Run Test ${TEST_SETUP} rtc ${EXPECTED_MIN} ${EXPECTED_MAX} ${EXPECTED_AVG} + [Documentation] + ... Run hiperf RTC on the \${TEST_SETUP} topology and check consumer syncs to + ... producer bitrate. + ... Arguments: + ... ${TEST_TOPOLOGY} The topology of the test. + ... ${TEST_SETUP} The setup of the test. + ... ${EXPECTED_MIN} The expected min bitrate + ... ${EXPECTED_MAX} The expected max bitrate + ... ${EXPECTED_AVG} The expected avg bitrate + [Arguments] + ... ${TEST_TOPOLOGY}=${NONE} + ... ${TEST_SETUP}=${NONE} + ... ${EXPECTED_MIN}=${NONE} + ... ${EXPECTED_MAX}=${NONE} + ... ${EXPECTED_AVG}=${NONE} + Run Test + ... ${TEST_TOPOLOGY} + ... ${TEST_SETUP} + ... rtc + ... ${EXPECTED_MIN} + ... ${EXPECTED_MAX} + ... ${EXPECTED_AVG} -Run Latency Test New - [Documentation] Run hicn-ping on the ${TEST_SETUP} topology with the new packet format and measure latency. - ... Arguments: - ... ${TEST_SETUP} The setup of the test. - ... ${EXPECTED_MIN} The expected min latency - ... ${EXPECTED_MAX} The expected max latency - ... ${EXPECTED_AVG} The expected avg latency - [Arguments] ${TEST_SETUP}=${NONE} ${EXPECTED_MIN}=${NONE} ${EXPECTED_MAX}=${NONE} ${EXPECTED_AVG}=${NONE} - Run Test ${TEST_SETUP} latency-new ${EXPECTED_MIN} ${EXPECTED_MAX} ${EXPECTED_AVG} +Run Latency Test New Packet Format + [Documentation] + ... Run hicn-ping on the \${TEST_SETUP} topology with the new + ... packet format and measure latency. + ... Arguments: + ... ${TEST_TOPOLOGY} The topology of the test. + ... ${TEST_SETUP} The setup of the test. + ... ${EXPECTED_MIN} The expected min latency + ... ${EXPECTED_MAX} The expected max latency + ... ${EXPECTED_AVG} The expected avg latency + [Arguments] + ... ${TEST_TOPOLOGY}=${NONE} + ... ${TEST_SETUP}=${NONE} + ... ${EXPECTED_MIN}=${NONE} + ... ${EXPECTED_MAX}=${NONE} + ... ${EXPECTED_AVG}=${NONE} + Run Test + ... ${TEST_TOPOLOGY} + ... ${TEST_SETUP} + ... latency-new-paket-format + ... ${EXPECTED_MIN} + ... ${EXPECTED_MAX} + ... ${EXPECTED_AVG} diff --git a/tests/run-functional.sh b/tests/run-functional.sh index b8dfbc437..34515ac85 100644 --- a/tests/run-functional.sh +++ b/tests/run-functional.sh @@ -27,7 +27,7 @@ for t in functional-tests/*; do robot --NoStatusRC \ --outputdir report_"${test}" \ - -P ${PWD} functional-tests/"${test}" + functional-tests/"${test}" REPORTS+=(report_"${test}"/output.xml) done -- cgit 1.2.3-korg