aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbootstrap-DMM.sh435
-rw-r--r--resources/libraries/python/DMM/DMMConstants.py29
-rw-r--r--resources/libraries/python/DMM/SetupDMMTest.py183
-rw-r--r--resources/libraries/python/DMM/SingleCliSer.py113
-rw-r--r--resources/libraries/python/DMM/__init__.py16
-rw-r--r--resources/libraries/robot/dmm/dmm_utils.robot63
-rwxr-xr-xtests/dmm/dmm_scripts/install_dmm.sh128
-rwxr-xr-xtests/dmm/dmm_scripts/run_dmm.sh122
-rw-r--r--tests/dmm/func/SingleCliSer-func.robot37
-rw-r--r--tests/dmm/func/__init__.robot18
10 files changed, 1144 insertions, 0 deletions
diff --git a/bootstrap-DMM.sh b/bootstrap-DMM.sh
new file mode 100755
index 0000000000..fd212c51ef
--- /dev/null
+++ b/bootstrap-DMM.sh
@@ -0,0 +1,435 @@
+#!/bin/bash
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -x
+
+cat /etc/hostname
+cat /etc/hosts
+
+SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+export PYTHONPATH=${SCRIPT_DIR}
+
+if [ -f "/etc/redhat-release" ]; then
+ DISTRO="CENTOS"
+ sudo yum install -y python-devel python-virtualenv
+ DPDK_ARTIFACTS=""
+ VIRL_TOPOLOGY=$(cat ${SCRIPT_DIR}/VIRL_TOPOLOGY_CENTOS)
+ VIRL_RELEASE=$(cat ${SCRIPT_DIR}/VIRL_RELEASE_CENTOS)
+else
+ DISTRO="UBUNTU"
+ export DEBIAN_FRONTEND=noninteractive
+ sudo apt-get -y update
+ sudo apt-get -y install libpython2.7-dev python-virtualenv
+ DPDK_ARTIFACTS=""
+ VIRL_TOPOLOGY=$(cat ${SCRIPT_DIR}/VIRL_TOPOLOGY_UBUNTU)
+ VIRL_RELEASE=$(cat ${SCRIPT_DIR}/VIRL_RELEASE_UBUNTU)
+fi
+
+VIRL_SERVERS=("10.30.51.28" "10.30.51.29" "10.30.51.30")
+IPS_PER_VIRL=( "10.30.51.28:252"
+ "10.30.51.29:252"
+ "10.30.51.30:252" )
+SIMS_PER_VIRL=( "10.30.51.28:13"
+ "10.30.51.29:13"
+ "10.30.51.30:13" )
+IPS_PER_SIMULATION=5
+
+function get_max_ip_nr() {
+ virl_server=$1
+ IP_VALUE="0"
+ for item in "${IPS_PER_VIRL[@]}" ; do
+ if [ "${item%%:*}" == "${virl_server}" ]
+ then
+ IP_VALUE=${item#*:}
+ break
+ fi
+ done
+ echo "$IP_VALUE"
+}
+
+function get_max_sim_nr() {
+ virl_server=$1
+ SIM_VALUE="0"
+ for item in "${SIMS_PER_VIRL[@]}" ; do
+ if [ "${item%%:*}" == "${virl_server}" ]
+ then
+ SIM_VALUE=${item#*:}
+ break
+ fi
+ done
+ echo "$SIM_VALUE"
+}
+
+VIRL_USERNAME=jenkins-in
+VIRL_PKEY=priv_key
+VIRL_SERVER_STATUS_FILE="status"
+VIRL_SERVER_EXPECTED_STATUS="PRODUCTION"
+
+SSH_OPTIONS="-i ${VIRL_PKEY} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes -o LogLevel=error"
+
+TEST_GROUPS=("func")
+SUITE_PATH="tests.dmm"
+SKIP_PATCH="SKIP_PATCH"
+
+# Create tmp dir
+mkdir ${SCRIPT_DIR}/tmp
+
+# Use tmp dir to store log files
+LOG_PATH="${SCRIPT_DIR}/tmp"
+
+# Use tmp dir for tarballs
+export TMPDIR="${SCRIPT_DIR}/tmp"
+
+function ssh_do() {
+ echo
+ echo "### " ssh $@
+ ssh ${SSH_OPTIONS} $@
+}
+
+rm -f ${VIRL_PKEY}
+cat > ${VIRL_PKEY} <<EOF
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEA+IHXq87GcqMR1C47rzx6Cbip5Ghq8pKrbqKrP5Nf41HcYrT6
+GOXl9nFWKsMOzIlIn+8y7Il27eZh7csQGApbg8QLiHMtcYEmWNzKZpkqg4nuAPxX
+VXwlKgnKX902SrET9Gp9TDayiHtCRWVfrlPPPSA0UEXW6BjLN/uHJ+W/Xzrrab+9
+asBVa05vT2W6n0KJ66zfCaeDM912mQ6SttscAwFoWDmdHlegiVqrlIG2ABxOvxxz
+L3dM3iSmlmQlzv9bThjo+nI4KFYh6m5wrZmAo5r/4q9CIJc21HVnTqkGOWJIZz6J
+73lePJVSq5gYqaoGw3swFEA/MDkOx7baWKSoLQIDAQABAoIBAQCNBeolNp+JWJ76
+gQ4fwLsknyXSV6sxYyhkDW4PEwwcTU06uqce0AAzXVffxne0fMe48x47+zqBgPbb
+4huM+Pu8B9nfojUMr5TaYtl9Zbgpk3F8H7dT7LKOa6XrxvZTZrADSRc30+Z26zPN
+e9zTaf42Gvt0/l0Zs1BHwbaOXqO+XuwJ3/F9Sf3PQYWXD3EOWjpHDP/X/1vAs6lV
+SLkm6J/9KKE1m6I6LTYjIXuYt4SXybW6N2TSy54hhQtYcDUnIU2hR/PHVWKrGA0J
+kELgrtTNTdbML27O5gFWU4PLUEYTZ9fN11D6qUZKxLcPOiPPHXkiILMRCCnG5DYI
+ksBAU/YlAoGBAPxZO9VO18TYc8THV1nLKcvT2+1oSs1UcA2wNQMU55t910ZYinRa
+MRwUhMOf8Mv5wOeiZaRICQB1PnVWtDVmGECgPpK6jUxqAwn8rgJcnoafLGL5YKMY
+RVafTe6N5LXgCaOcJrk21wxs6v7ninEbUxxc575urOvZMBkymDw91dwbAoGBAPwa
+YRhKhrzFKZzdK0RadVjnxKvolUllpoqqg3XuvmeAJHAOAnaOgVWq68NAcp5FZJv0
+2D2Up7TX8pjf9MofP1SJbcraKBpK4NzfNkA0dSdEi+FhVofAJ9umB2o5LW1n7sab
+UIrjsdzSJK/9Zb9yTTHPyibYzNEgaJV1HsbxfEFXAoGAYO2RmvRm0phll18OQVJV
+IpKk9kLKAKZ/R/K32hAsikBC8SVPQTPniyaifFWx81diblalff2hX4ipTf7Yx24I
+wMIMZuW7Im/R7QMef4+94G3Bad7p7JuE/qnAEHJ2OBnu+eYfxaK35XDsrq6XMazS
+NqHE7hOq3giVfgg+C12hCKMCgYEAtu9dbYcG5owbehxzfRI2/OCRsjz/t1bv1seM
+xVMND4XI6xb/apBWAZgZpIFrqrWoIBM3ptfsKipZe91ngBPUnL9s0Dolx452RVAj
+yctHB8uRxWYgqDkjsxtzXf1HnZBBkBS8CUzYj+hdfuddoeKLaY3invXLCiV+PpXS
+U4KAK9kCgYEAtSv0m5+Fg74BbAiFB6kCh11FYkW94YI6B/E2D/uVTD5dJhyEUFgZ
+cWsudXjMki8734WSpMBqBp/J8wG3C9ZS6IpQD+U7UXA+roB7Qr+j4TqtWfM+87Rh
+maOpG56uAyR0w5Z9BhwzA3VakibVk9KwDgZ29WtKFzuATLFnOtCS46E=
+-----END RSA PRIVATE KEY-----
+EOF
+chmod 600 ${VIRL_PKEY}
+
+#
+# The server must be reachable and have a "status" file with
+# the content "PRODUCTION" to be selected.
+#
+# If the server is not reachable or does not have the correct
+# status remove it from the array and start again.
+#
+# Abort if there are no more servers left in the array.
+#
+VIRL_PROD_SERVERS=()
+for index in "${!VIRL_SERVERS[@]}"; do
+ virl_server_status=$(ssh ${SSH_OPTIONS} ${VIRL_USERNAME}@${VIRL_SERVERS[$index]} cat $VIRL_SERVER_STATUS_FILE 2>&1)
+ echo VIRL HOST ${VIRL_SERVERS[$index]} status is \"$virl_server_status\"
+ if [ "$virl_server_status" == "$VIRL_SERVER_EXPECTED_STATUS" ]
+ then
+ # Candidate is in good status. Add to array.
+ VIRL_PROD_SERVERS+=(${VIRL_SERVERS[$index]})
+ fi
+done
+
+VIRL_SERVERS=("${VIRL_PROD_SERVERS[@]}")
+echo "VIRL servers in production: ${VIRL_SERVERS[@]}"
+num_hosts=${#VIRL_SERVERS[@]}
+if [ $num_hosts == 0 ]
+then
+ echo "No more VIRL candidate hosts available, failing."
+ exit 127
+fi
+
+# Get the LOAD of each server based on number of active simulations (testcases)
+VIRL_SERVER_LOAD=()
+for index in "${!VIRL_SERVERS[@]}"; do
+ VIRL_SERVER_LOAD[${index}]=$(ssh ${SSH_OPTIONS} ${VIRL_USERNAME}@${VIRL_SERVERS[$index]} "list-testcases | grep session | wc -l")
+done
+
+# Pick for each TEST_GROUP least loaded server
+VIRL_SERVER=()
+for index in "${!TEST_GROUPS[@]}"; do
+ least_load_server_idx=$(echo "${VIRL_SERVER_LOAD[*]}" | tr -s ' ' '\n' | awk '{print($0" "NR)}' | sort -g -k1,1 | head -1 | cut -f2 -d' ')
+ least_load_server=${VIRL_SERVERS[$least_load_server_idx-1]}
+ VIRL_SERVER+=($least_load_server)
+ # Adjusting load as we are not going run simulation immediately
+ VIRL_SERVER_LOAD[$least_load_server_idx-1]=$((VIRL_SERVER_LOAD[$least_load_server_idx-1]+1))
+done
+
+echo "Selected VIRL servers: ${VIRL_SERVER[@]}"
+
+# Temporarily download DPDK packages
+DMM_TAR_FILE="dmm_depends.tar.gz"
+
+cd dmm/scripts/
+./build.sh
+cd -
+
+mv /tmp/dpdk/dpdk-18.02.tar.xz .
+
+wget http://security.ubuntu.com/ubuntu/pool/main/n/numactl/libnuma1_2.0.11-1ubuntu1.1_amd64.deb
+wget http://security.ubuntu.com/ubuntu/pool/main/n/numactl/libnuma-dev_2.0.11-1ubuntu1.1_amd64.deb
+
+tar zcf ${DMM_TAR_FILE} dpdk-18.02.tar.xz ./dmm/ libnuma*.deb
+
+VIRL_DIR_LOC="/tmp"
+
+cat ${VIRL_PKEY}
+
+# Copy the files to VIRL hosts
+DONE=""
+for index in "${!VIRL_SERVER[@]}"; do
+ # Do not copy files in case they have already been copied to the VIRL host
+ [[ "${DONE[@]}" =~ "${VIRL_SERVER[${index}]}" ]] && copy=0 || copy=1
+
+ if [ "${copy}" -eq "0" ]; then
+ echo "DMM_TAR_FILE has already been copied to the VIRL host ${VIRL_SERVER[${index}]}"
+ else
+ scp ${SSH_OPTIONS} ${DMM_TAR_FILE} \
+ ${VIRL_USERNAME}@${VIRL_SERVER[${index}]}:${VIRL_DIR_LOC}/
+
+ result=$?
+ if [ "${result}" -ne "0" ]; then
+ echo "Failed to copy DMM_TAR_FILE to VIRL host ${VIRL_SERVER[${index}]}"
+ echo ${result}
+ exit ${result}
+ else
+ echo "DMM_TAR_FILE successfully copied to the VIRL host ${VIRL_SERVER[${index}]}"
+ fi
+ DONE+=(${VIRL_SERVER[${index}]})
+ fi
+done
+
+# Start a simulation on VIRL server
+
+function stop_virl_simulation {
+ for index in "${!VIRL_SERVER[@]}"; do
+ ssh ${SSH_OPTIONS} ${VIRL_USERNAME}@${VIRL_SERVER[${index}]}\
+ "stop-testcase ${VIRL_SID[${index}]}"
+ done
+}
+
+# Upon script exit, cleanup the simulation execution
+trap stop_virl_simulation EXIT
+
+for index in "${!VIRL_SERVER[@]}"; do
+ echo "Starting simulation nr. ${index} on VIRL server ${VIRL_SERVER[${index}]}"
+ # Get given VIRL server limits for max. number of VMs and IPs
+ max_ips=$(get_max_ip_nr ${VIRL_SERVER[${index}]})
+ max_ips_from_sims=$(($(get_max_sim_nr ${VIRL_SERVER[${index}]})*IPS_PER_SIMULATION))
+ # Set quota to lower value
+ IP_QUOTA=$([ $max_ips -le $max_ips_from_sims ] && echo "$max_ips" || echo "$max_ips_from_sims")
+ # Start the simulation
+ VIRL_SID[${index}]=$(ssh ${SSH_OPTIONS} \
+ ${VIRL_USERNAME}@${VIRL_SERVER[${index}]} \
+ "start-testcase-DMM -vv --quota ${IP_QUOTA} --copy ${VIRL_TOPOLOGY} \
+ --release ${VIRL_RELEASE} ${VIRL_DIR_LOC}/${DMM_TAR_FILE}")
+ # TODO: remove param ${DMM_TAR_FILE} when start-testcase script is
+ # updated on all virl servers
+ retval=$?
+ if [ ${retval} -ne "0" ]; then
+ echo "VIRL simulation start failed on ${VIRL_SERVER[${index}]}"
+ exit ${retval}
+ fi
+ if [[ ! "${VIRL_SID[${index}]}" =~ session-[a-zA-Z0-9_]{6} ]]; then
+ echo "No VIRL session ID reported."
+ exit 127
+ fi
+ echo "VIRL simulation nr. ${index} started on ${VIRL_SERVER[${index}]}"
+
+ ssh_do ${VIRL_USERNAME}@${VIRL_SERVER[${index}]}\
+ cat /scratch/${VIRL_SID[${index}]}/topology.yaml
+
+ # Download the topology file from VIRL session and rename it
+ scp ${SSH_OPTIONS} \
+ ${VIRL_USERNAME}@${VIRL_SERVER[${index}]}:/scratch/${VIRL_SID[${index}]}/topology.yaml \
+ topologies/enabled/topology${index}.yaml
+
+ retval=$?
+ if [ ${retval} -ne "0" ]; then
+ echo "Failed to copy topology file from VIRL simulation nr. ${index} on VIRL server ${VIRL_SERVER[${index}]}"
+ exit ${retval}
+ fi
+done
+
+echo ${VIRL_SID[@]}
+
+virtualenv --system-site-packages env
+. env/bin/activate
+
+echo pip install
+pip install -r ${SCRIPT_DIR}/requirements.txt
+
+for index in "${!VIRL_SERVER[@]}"; do
+ pykwalify -s ${SCRIPT_DIR}/resources/topology_schemas/3_node_topology.sch.yaml \
+ -s ${SCRIPT_DIR}/resources/topology_schemas/topology.sch.yaml \
+ -d ${SCRIPT_DIR}/topologies/enabled/topology${index}.yaml \
+ -vvv
+ if [ "$?" -ne "0" ]; then
+ echo "Topology${index} schema validation failed."
+ echo "However, the tests will start."
+ fi
+done
+
+function run_test_set() {
+ set +x
+ OLDIFS=$IFS
+ IFS=","
+ nr=$(echo $1)
+ rm -f ${LOG_PATH}/test_run${nr}.log
+ exec &> >(while read line; do echo "$(date +'%H:%M:%S') $line" \
+ >> ${LOG_PATH}/test_run${nr}.log; done;)
+ suite_str=""
+ for suite in ${TEST_GROUPS[${nr}]}; do
+ suite_str="${suite_str} --suite ${SUITE_PATH}.${suite}"
+ done
+ IFS=$OLDIFS
+
+ echo "PYTHONPATH=`pwd` pybot -L TRACE -W 136\
+ -v TOPOLOGY_PATH:${SCRIPT_DIR}/topologies/enabled/topology${nr}.yaml \
+ ${suite_str} \
+ --include vm_envAND3_node_single_link_topo \
+ --include vm_envAND3_node_double_link_topo \
+ --exclude PERFTEST \
+ --exclude ${SKIP_PATCH} \
+ --noncritical EXPECTED_FAILING \
+ --output ${LOG_PATH}/log_test_set_run${nr} \
+ tests/"
+
+ PYTHONPATH=`pwd` pybot -L TRACE -W 136\
+ -v TOPOLOGY_PATH:${SCRIPT_DIR}/topologies/enabled/topology${nr}.yaml \
+ ${suite_str} \
+ --include vm_envAND3_node_single_link_topo \
+ --include vm_envAND3_node_double_link_topo \
+ --exclude PERFTEST \
+ --exclude ${SKIP_PATCH} \
+ --noncritical EXPECTED_FAILING \
+ --output ${LOG_PATH}/log_test_set_run${nr} \
+ tests/
+
+ local local_run_rc=$?
+ set -x
+ echo ${local_run_rc} > ${LOG_PATH}/rc_test_run${nr}
+}
+
+set +x
+# Send to background an instance of the run_test_set() function for each number,
+# record the pid.
+for index in "${!VIRL_SERVER[@]}"; do
+ run_test_set ${index} &
+ pid=$!
+ echo "Sent to background: Test_set${index} (pid=$pid)"
+ pids[$pid]=$index
+done
+
+echo
+echo -n "Waiting..."
+
+# Watch the stable of background processes.
+# If a pid goes away, remove it from the array.
+while [ -n "${pids[*]}" ]; do
+ for i in $(seq 0 9); do
+ sleep 1
+ echo -n "."
+ done
+ for pid in "${!pids[@]}"; do
+ if ! ps "$pid" >/dev/null; then
+ echo -e "\n"
+ echo "Test_set${pids[$pid]} with PID $pid finished."
+ unset pids[$pid]
+ fi
+ done
+ if [ -z "${!pids[*]}" ]; then
+ break
+ fi
+ echo -n -e "\nStill waiting for test set(s): ${pids[*]} ..."
+done
+
+echo
+echo "All test set runs finished."
+echo
+
+set -x
+
+RC=0
+for index in "${!VIRL_SERVER[@]}"; do
+ echo "Test_set${index} log:"
+ cat ${LOG_PATH}/test_run${index}.log
+ RC_PARTIAL_RUN=$(cat ${LOG_PATH}/rc_test_run${index})
+ if [ -z "$RC_PARTIAL_RUN" ]; then
+ echo "Failed to retrieve return code from test run ${index}"
+ exit 1
+ fi
+ RC=$((RC+RC_PARTIAL_RUN))
+ rm -f ${LOG_PATH}/rc_test_run${index}
+ rm -f ${LOG_PATH}/test_run${index}.log
+ echo
+done
+
+# Log the final result
+if [ "${RC}" -eq "0" ]; then
+ set +x
+ echo
+ echo "========================================================================================================================================"
+ echo "Final result of all test loops: | PASS |"
+ echo "All critical tests have passed."
+ echo "========================================================================================================================================"
+ echo
+ set -x
+else
+ if [ "${RC}" -eq "1" ]; then
+ HLP_STR="test has"
+ else
+ HLP_STR="tests have"
+ fi
+ set +x
+ echo
+ echo "========================================================================================================================================"
+ echo "Final result of all test loops: | FAIL |"
+ echo "${RC} critical ${HLP_STR} failed."
+ echo "========================================================================================================================================"
+ echo
+ set -x
+fi
+
+echo Post-processing test data...
+
+partial_logs=""
+for index in "${!VIRL_SERVER[@]}"; do
+ partial_logs="${partial_logs} ${LOG_PATH}/log_test_set_run${index}.xml"
+done
+
+# Rebot output post-processing
+rebot --noncritical EXPECTED_FAILING \
+ --output output.xml ${partial_logs}
+
+# Remove unnecessary log files
+rm -f ${partial_logs}
+
+echo Post-processing finished.
+
+if [ ${RC} -eq 0 ]; then
+ RETURN_STATUS=0
+else
+ RETURN_STATUS=1
+fi
+
+exit ${RETURN_STATUS}
diff --git a/resources/libraries/python/DMM/DMMConstants.py b/resources/libraries/python/DMM/DMMConstants.py
new file mode 100644
index 0000000000..ec6f192bf3
--- /dev/null
+++ b/resources/libraries/python/DMM/DMMConstants.py
@@ -0,0 +1,29 @@
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This file defines the constants variables for the DMM test."""
+
+class DMMConstants(object):
+ """Define the directory path for the DMM test."""
+
+ # DMM testing directory location at topology nodes
+ REMOTE_FW_DIR = '/tmp/DMM-testing'
+
+ # Shell scripts location
+ DMM_SCRIPTS = 'tests/dmm/dmm_scripts'
+
+ # Libraries location
+ DMM_DEPLIBS = 'tests/dmm/dmm_deplibs'
+
+ # Config files location for the DMM test
+ DMM_TESTCONFIG = 'tests/dmm/dmm_testconfig'
diff --git a/resources/libraries/python/DMM/SetupDMMTest.py b/resources/libraries/python/DMM/SetupDMMTest.py
new file mode 100644
index 0000000000..7d219fcfd4
--- /dev/null
+++ b/resources/libraries/python/DMM/SetupDMMTest.py
@@ -0,0 +1,183 @@
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""This module exists to provide setup utilities for the framework on topology
+nodes. All tasks required to be run before the actual tests are started is
+supposed to end up here.
+"""
+
+from shlex import split
+from subprocess import Popen, PIPE
+from multiprocessing import Pool
+from tempfile import NamedTemporaryFile
+from os.path import basename
+
+from robot.api import logger
+from robot.libraries.BuiltIn import BuiltIn
+
+from resources.libraries.python.ssh import SSH
+from resources.libraries.python.DMM.DMMConstants import DMMConstants as con
+from resources.libraries.python.topology import NodeType, Topology
+from resources.libraries.python.TLDK.SetupTLDKTest import copy_tarball_to_node,\
+ delete_local_tarball
+
+__all__ = ["SetupDMMTest"]
+
+
+def pack_framework_dir():
+ """Pack the testing WS into temp file, return its name.
+ :returns: file_name
+ :rtype: str
+ :raises RuntimeError: If pack the testing framework failed.
+ """
+ tmpfile = NamedTemporaryFile(suffix=".tgz", prefix="DMM-testing-")
+ file_name = tmpfile.name
+ tmpfile.close()
+
+ proc = Popen(
+ split("tar --exclude-vcs --exclude=./tmp --exclude=dmm_depends.tar.gz"
+ " -zcf {0} .".format(file_name)), stdout=PIPE, stderr=PIPE)
+ (stdout, stderr) = proc.communicate()
+
+ logger.debug(stdout)
+ logger.debug(stderr)
+
+ return_code = proc.wait()
+ if return_code != 0:
+ raise RuntimeError("Could not pack testing framework.")
+
+ return file_name
+
+def extract_tarball_at_node(tarball, node):
+ """Extract tarball at given node.
+
+ Extracts tarball using tar on given node to specific CSIT location.
+ Raise runtime errors when failed.
+
+ :param tarball: Path to tarball to upload.
+ :param node: Dictionary created from topology.
+ :type tarball: str
+ :type node: dict
+ :return: nothing
+ :raises RuntimeError: If extract tarball failed.
+ """
+ logger.console('Extracting tarball to {0} on {1}'.format(
+ con.REMOTE_FW_DIR, node['host']))
+ ssh = SSH()
+ ssh.connect(node)
+
+ cmd = 'sudo rm -rf {1}; mkdir {1} ; tar -zxf {0} -C {1}; ' \
+ 'rm -f {0};'.format(tarball, con.REMOTE_FW_DIR)
+ (ret_code, _, stderr) = ssh.exec_command(cmd, timeout=30)
+ if ret_code != 0:
+ logger.error('Unpack error: {0}'.format(stderr))
+ raise RuntimeError('Failed to unpack {0} at node {1}'.format(
+ tarball, node['host']))
+
+def install_dmm_test(node):
+ """Prepare the DMM test envrionment.
+ Raise errors when failed.
+
+ :param node: Dictionary created from topology.
+ :type node: dict
+ :returns: nothing.
+ :raises RuntimeError: If install dmm failed.
+ """
+
+ arch = Topology.get_node_arch(node)
+ logger.console('Install the DMM on {0} ({1})'.format(node['host'], arch))
+
+ ssh = SSH()
+ ssh.connect(node)
+ (ret_code, _, stderr) = ssh.exec_command(
+ 'cd {0}/{1} && ./install_dmm.sh {2} 2>&1 | tee log_install_dmm.txt'
+ .format(con.REMOTE_FW_DIR, con.DMM_SCRIPTS, arch), timeout=600)
+
+ if ret_code != 0:
+ logger.error('Install the DMM error: {0}'.format(stderr))
+ raise RuntimeError('Install the DMM failed')
+ else:
+ logger.console('Install the DMM on {0} success!'.format(node['host']))
+
+def setup_node(args):
+ """Run all set-up methods for a node.
+
+ This method is used as map_async parameter. It receives tuple with all
+ parameters as passed to map_async function.
+
+ :param args: All parameters needed to setup one node.
+ :type args: tuple
+ :returns: True - success, False - error
+ :rtype: bool
+ :raises RuntimeError: If node setup failed.
+ """
+ tarball, remote_tarball, node = args
+
+ # if unset, arch defaults to x86_64
+ Topology.get_node_arch(node)
+
+ try:
+ if node['type'] == NodeType.DUT:
+ copy_tarball_to_node(tarball, node)
+ extract_tarball_at_node(remote_tarball, node)
+ install_dmm_test(node)
+ except RuntimeError as exc:
+ logger.error("Node setup failed, error:'{0}'".format(exc.message))
+ return False
+ else:
+ logger.console('Setup of node {0} done'.format(node['host']))
+ return True
+
+class SetupDMMTest(object):
+ """Setup suite run on topology nodes.
+
+ Many VAT/CLI based tests need the scripts at remote hosts before executing
+ them. This class packs the whole testing directory and copies it over
+ to all nodes in topology under /tmp/
+ """
+
+ @staticmethod
+ def setup_dmm_test(nodes):
+ """Pack the whole directory and extract in temp on each node."""
+
+ tarball = pack_framework_dir()
+ msg = 'Framework packed to {0}'.format(tarball)
+ logger.console(msg)
+ logger.trace(msg)
+ remote_tarball = "/tmp/{0}".format(basename(tarball))
+
+ # Turn off logging since we use multiprocessing.
+ log_level = BuiltIn().set_log_level('NONE')
+ params = ((tarball, remote_tarball, node) for node in nodes.values())
+ pool = Pool(processes=len(nodes))
+ result = pool.map_async(setup_node, params)
+ pool.close()
+ pool.join()
+
+ # Turn on logging.
+ BuiltIn().set_log_level(log_level)
+
+ logger.info(
+ 'Executed node setups in parallel, waiting for processes to end')
+ result.wait()
+
+ results = result.get()
+ node_setup_success = all(results)
+ logger.info('Results: {0}'.format(results))
+
+ logger.trace('Test framework copied to all topology nodes')
+ delete_local_tarball(tarball)
+ if node_setup_success:
+ logger.console('All nodes are ready')
+ else:
+ logger.console('Failed to setup dpdk on all the nodes')
diff --git a/resources/libraries/python/DMM/SingleCliSer.py b/resources/libraries/python/DMM/SingleCliSer.py
new file mode 100644
index 0000000000..504f59c415
--- /dev/null
+++ b/resources/libraries/python/DMM/SingleCliSer.py
@@ -0,0 +1,113 @@
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+This module exists to provide the vs_epoll ping test for DMM on topology nodes.
+"""
+import time
+
+from resources.libraries.python.ssh import SSH
+from resources.libraries.python.DMM.DMMConstants import DMMConstants as con
+from resources.libraries.python.topology import Topology
+from robot.api import logger
+
+class SingleCliSer(object):
+ """Test the DMM vs_epoll ping function."""
+
+ @staticmethod
+ def exec_the_base_vs_epoll_test(dut1_node, dut2_node):
+ """Execute the vs_epoll on the dut1_node.
+
+ :param dut1_node: Will execute the vs_epoll on this node.
+ :param dut2_node: Will execute the vc_epoll on this node.
+ :type dut1_node: dict
+ :type dut2_node: dict
+ :returns: positive value if packets are sent and received
+ :raises RuntimeError:If failed to execute vs_epoll test on dut1_node.
+ """
+ dut1_ip = Topology.get_node_hostname(dut1_node)
+ dut2_ip = Topology.get_node_hostname(dut2_node)
+
+ ssh = SSH()
+ ssh.connect(dut1_node)
+
+ cmd = 'cd {0}/{1} && ./run_dmm.sh {2} {3} {4} ' \
+ .format(con.REMOTE_FW_DIR, con.DMM_SCRIPTS, dut1_ip, dut2_ip, 0)
+
+ cmd += '2>&1 | tee log_run_dmm.txt &'
+
+ (ret_code, _, _) = ssh.exec_command(cmd, timeout=6000)
+ if ret_code != 0:
+ raise RuntimeError('Failed to execute vs_epoll test at node {0}'
+ .format(dut1_node['host']))
+
+ time.sleep(10)
+
+ ssh = SSH()
+ ssh.connect(dut2_node)
+
+ cmd = 'cd {0}/{1} && ./run_dmm.sh {2} {3} {4} ' \
+ .format(con.REMOTE_FW_DIR, con.DMM_SCRIPTS, dut1_ip, dut2_ip, 1)
+
+ cmd += '2>&1 | tee log_run_dmm.txt'
+
+ (ret_code, stdout_cli, _) = ssh.exec_command(cmd, timeout=6000)
+ if ret_code != 0:
+ raise RuntimeError('Failed to execute vs_epoll test at node {0}'
+ .format(dut1_node['host']))
+
+ return stdout_cli.find("send 50000")
+
+ @staticmethod
+ def get_the_test_result(dut_node):
+ """
+ After executing exec_the_base_vs_epoll_test, use this
+ to get the test result
+
+ :param dut_node: will get the test result in this dut node
+ :type dut_node: dict
+ :returns: str.
+ :rtype: str.
+ :raises RuntimeError: If failed to get the test result.
+ """
+ ssh = SSH()
+ ssh.connect(dut_node)
+ cmd = 'cat {0}/{1}/log_run_dmm.txt | grep "send 50000" | wc -l' \
+ .format(con.REMOTE_FW_DIR, con.DMM_SCRIPTS)
+
+ (ret_code, stdout, _) = ssh.exec_command(cmd, timeout=100)
+ if ret_code != 0:
+ raise RuntimeError('Failed to get test result at node {0}'
+ .format(dut_node['host']))
+
+ return stdout
+
+ @staticmethod
+ def echo_dmm_logs(dut_node):
+ """
+ :param dut_node:
+ :return:
+ """
+ ssh = SSH()
+ ssh.connect(dut_node)
+ cmd = 'cat {0}/{1}/log_install_dmm.txt' \
+ .format(con.REMOTE_FW_DIR, con.DMM_SCRIPTS)
+
+ (ret_code, stdout, _) = ssh.exec_command(cmd, timeout=100)
+ if ret_code != 0:
+ raise RuntimeError('Failed to get log_install_dmm at node {0}'
+ .format(dut_node['host']))
+ else:
+ logger.console('....log_install_dmm on node {1}.... {0}'
+ .format(stdout, dut_node['host']))
diff --git a/resources/libraries/python/DMM/__init__.py b/resources/libraries/python/DMM/__init__.py
new file mode 100644
index 0000000000..d7abcb27b1
--- /dev/null
+++ b/resources/libraries/python/DMM/__init__.py
@@ -0,0 +1,16 @@
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+__init__ file for directory resources/libraries/python/DMM
+"""
diff --git a/resources/libraries/robot/dmm/dmm_utils.robot b/resources/libraries/robot/dmm/dmm_utils.robot
new file mode 100644
index 0000000000..24931768fb
--- /dev/null
+++ b/resources/libraries/robot/dmm/dmm_utils.robot
@@ -0,0 +1,63 @@
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+| Library | resources.libraries.python.NodePath
+| Documentation | *Utilities for the path computing, pcap reading*
+| ...
+| ... | Utilities for the path computing, pcap file reading and also the port
+| ... | selection.
+
+*** Keywords ***
+| Path for 2-node testing is set
+| | [Documentation] | Compute the path for the 2 node testing.
+| | ...
+| | ... | *Arguments:*
+| | ... | - dut1_node - DUT1 node. Type: dictionary
+| | ... | - dut2_node - DUT2 node. Type: dictionary
+| | ...
+| | ... | *Return:*
+| | ... | - No value returned.
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Path for 2-node testing is set \| ${nodes['DUT1']} \
+| | ... | \| ${nodes['DUT2'] \|
+| | ...
+| | [Arguments] | ${dut1_node} | ${dut2_node}
+| | Append Nodes | ${dut1_node} | ${dut2_node}
+| | Compute Path
+
+| Pick out the port used to execute test
+| | [Documentation] | Pick out the port used to execute the test.
+| | ...
+| | ... | *Arguments:*
+| | ... | - No arguments.
+| | ...
+| | ... | *Return:*
+| | ... | - No value returned.
+| | ...
+| | ... | *Example:*
+| | ...
+| | ... | \| Pick out the port used to execute test \|
+| | ...
+| | ${tg_port} | ${tg_node}= | First Interface
+| | ${dut1_port} | ${dut1_node}= | Next Interface
+| | ${dut2_port} | ${dut2_node}= | Last Interface
+| | Set Suite Variable | ${tg_node}
+| | Set Suite Variable | ${dut1_node}
+| | Set Suite Variable | ${dut2_node}
+| | Set Suite Variable | ${tg_port}
+| | Set Suite Variable | ${dut1_port}
+| | Set Suite Variable | ${dut2_port}
+
diff --git a/tests/dmm/dmm_scripts/install_dmm.sh b/tests/dmm/dmm_scripts/install_dmm.sh
new file mode 100755
index 0000000000..d6b7a862eb
--- /dev/null
+++ b/tests/dmm/dmm_scripts/install_dmm.sh
@@ -0,0 +1,128 @@
+#!/bin/bash
+
+set -x
+
+TIMESTAMP=$(date +%Y-%m-%d_%H-%M-%S)
+OS_ID=$(grep '^ID=' /etc/os-release | cut -f2- -d= | sed -e 's/\"//g')
+ROOTDIR=/tmp/DMM-testing
+
+DMM_DIR=${ROOTDIR}/dmm/
+
+#DPDK download path
+DPDK_DOWNLOAD_PATH=/tmp/dpdk
+
+#dpdk installation path
+DPDK_INSTALL_PATH=/usr
+
+# compile and install the DPDK
+echo "DPDK build started....."
+cd ${ROOTDIR}
+chmod +x *.deb
+sudo dpkg -i libnuma1_2.0.11-1ubuntu1.1_amd64.deb
+sudo dpkg -i libnuma-dev_2.0.11-1ubuntu1.1_amd64.deb
+
+#DPDK will be having dependancy on linux headers
+if [ "$OS_ID" == "ubuntu" ]; then
+ sudo apt-get -y install git build-essential linux-headers-`uname -r`
+ sudo apt-get install libnuma-dev
+elif [ "$OS_ID" == "debian" ]; then
+ sudo apt-get -y install git build-essential linux-headers-`uname -r`
+elif [ "$OS_ID" == "centos" ]; then
+ sudo yum groupinstall -y "Development Tools"
+ sudo yum install -y kernel-headers
+elif [ "$OS_ID" == "opensuse" ]; then
+ sudo yum groupinstall -y "Development Tools"
+ sudo yum install -y kernel-headers
+fi
+
+#===========build DPDK================
+mkdir -p $DPDK_DOWNLOAD_PATH
+
+DPDK_FOLDER=$DPDK_DOWNLOAD_PATH/dpdk-18.02-$TIMESTAMP
+cd $DPDK_DOWNLOAD_PATH
+mkdir $DPDK_FOLDER
+tar xvf /tmp/DMM-testing/dpdk-18.02.tar.xz -C $DPDK_FOLDER
+cd $DPDK_FOLDER/dpdk-18.02
+
+sed -i 's!CONFIG_RTE_EXEC_ENV=.*!CONFIG_RTE_EXEC_ENV=y!1' config/common_base
+sed -i 's!CONFIG_RTE_BUILD_SHARED_LIB=.*!CONFIG_RTE_BUILD_SHARED_LIB=y!1' config/common_base
+sed -i 's!CONFIG_RTE_LIBRTE_EAL=.*!CONFIG_RTE_LIBRTE_EAL=y!1' config/common_base
+sed -i 's!CONFIG_RTE_EAL_PMD_PATH=.*!CONFIG_RTE_EAL_PMD_PATH="/tmp/dpdk/drivers/"!1' config/common_base
+
+sudo make install T=x86_64-native-linuxapp-gcc DESTDIR=${DPDK_INSTALL_PATH} -j 4
+if [ $? -eq 0 ]
+then
+ echo "DPDK build is SUCCESS"
+else
+ echo "DPDK build has FAILED"
+ exit 1
+fi
+
+mkdir -p /tmp/dpdk/drivers/
+cp -f /usr/lib/librte_mempool_ring.so /tmp/dpdk/drivers/
+
+export NSTACK_LOG_ON=DBG
+
+# Try to kill the vs_epoll
+sudo killall vs_epoll
+
+sudo pgrep vs_epoll
+if [ $? -eq "0" ]; then
+ success=false
+ sudo pkill vs_epoll
+ echo "RC = $?"
+ for attempt in {1..5}; do
+ echo "Checking if vs_epoll is still alive, attempt nr ${attempt}"
+ sudo pgrep vs_epoll
+ if [ $? -eq "1" ]; then
+ echo "vs_epoll is dead"
+ success=true
+ break
+ fi
+ echo "vs_epoll is still alive, waiting 1 second"
+ sleep 1
+ done
+ if [ "$success" = false ]; then
+ echo "The command sudo pkill vs_epoll failed"
+ sudo pkill -9 vs_epoll
+ echo "RC = $?"
+ exit 1
+ fi
+else
+ echo "vs_epoll is not running"
+fi
+
+# check and setup the hugepages
+SYS_HUGEPAGE=$(cat /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages)
+hugepageFree=$(cat /sys/kernel/mm/hugepages/hugepages-2048kB/free_hugepages)
+
+if [ ${SYS_HUGEPAGE} -lt 1024 ] || [ $hugepageFree -eq 0 ]; then
+ MOUNT=$(mount | grep /mnt/nstackhuge)
+ count=$(mount | grep /mnt/nstackhuge | wc -l)
+
+ while [ "${MOUNT}" != "" ] || [ "${count}" -ne 0 ]
+ do
+ sudo umount /mnt/nstackhuge
+ sleep 1
+ MOUNT=$(mount | grep /mnt/nstackhuge)
+ count=$[$count -1]
+ done
+
+ sock_count=$(lscpu | grep 'Socket(s):' | head -1 | awk '{print $2}')
+ ls -l /sys/devices/system/node/
+
+ while [ "${sock_count}" -ne 0 ]
+ do
+ sock_count=$[$sock_count - 1]
+ echo 1024 | sudo tee /sys/devices/system/node/node"$sock_count"/hugepages/hugepages-2048kB/nr_hugepages
+ done
+
+ sudo mkdir -p /mnt/nstackhuge
+ sudo mount -t hugetlbfs -o pagesize=2M none /mnt/nstackhuge/
+ test $? -eq 0 || exit 1
+else
+ sudo mkdir -p /mnt/nstackhuge
+ sudo mount -t hugetlbfs -o pagesize=2M none /mnt/nstackhuge/
+fi
+
+sudo mkdir -p /var/run/ip_module/
diff --git a/tests/dmm/dmm_scripts/run_dmm.sh b/tests/dmm/dmm_scripts/run_dmm.sh
new file mode 100755
index 0000000000..f6fa3587e3
--- /dev/null
+++ b/tests/dmm/dmm_scripts/run_dmm.sh
@@ -0,0 +1,122 @@
+#!/bin/bash
+
+set -x
+
+OS_ID=$(grep '^ID=' /etc/os-release | cut -f2- -d= | sed -e 's/\"//g')
+ROOTDIR=/tmp/DMM-testing
+PWDDIR=$(pwd)
+APP_DIR=${ROOTDIR}/dmm/release/bin/
+LIB_PATH=${APP_DIR}/../lib64
+dut1_ip=$1
+dut2_ip=$2
+proc_name=$3
+#proc_name => 0 = server, 1= client
+
+# Try to kill the vs_epoll
+sudo killall vs_epoll
+
+sudo pgrep vs_epoll
+if [ $? -eq "0" ]; then
+ success=false
+ sudo pkill vs_epoll
+ echo "RC = $?"
+ for attempt in {1..5}; do
+ echo "Checking if vs_epoll is still alive, attempt nr ${attempt}"
+ sudo pgrep vs_epoll
+ if [ $? -eq "1" ]; then
+ echo "vs_epoll is dead"
+ success=true
+ break
+ fi
+ echo "vs_epoll is still alive, waiting 1 second"
+ sleep 1
+ done
+ if [ "$success" = false ]; then
+ echo "The command sudo pkill vs_epoll failed"
+ sudo pkill -9 vs_epoll
+ echo "RC = $?"
+ exit 1
+ fi
+else
+ echo "vs_epoll is not running"
+fi
+
+sleep 2
+
+cat /proc/meminfo
+
+cd ${LIB_PATH}
+chmod 777 *
+ls -l
+
+cd ${APP_DIR}
+cp -r ${LIB_PATH}/libnStackAPI.so .
+cp -r ../configure/* .
+chmod 777 *
+
+if [ "$OS_ID" == "ubuntu" ]; then
+ ifaddress1=$(ifconfig eth1 | grep 'inet addr' | cut -d: -f2 | awk '{print $1}')
+ echo $ifaddress1
+ ifaddress2=$(ifconfig eth2 | grep 'inet addr' | cut -d: -f2 | awk '{print $1}')
+ echo $ifaddress2
+elif [ "$OS_ID" == "centos" ]; then
+ ifaddress1=$(ifconfig enp0s8 | grep 'inet' | cut -d: -f2 | awk '{print $2}')
+ echo $ifaddress1
+ ifaddress2=$(ifconfig enp0s9 | grep 'inet' | cut -d: -f2 | awk '{print $2}')
+ echo $ifaddress2
+fi
+
+echo '{
+ "default_stack_name": "kernel",
+ "module_list": [
+ {
+ "stack_name": "kernel",
+ "function_name": "kernel_stack_register",
+ "libname": "./",
+ "loadtype": "static",
+ "deploytype": "1",
+ "maxfd": "1024",
+ "minfd": "0",
+ "priorty": "1",
+ "stackid": "0",
+ },
+ ]
+}' | tee module_config.json
+
+echo '{
+ "ip_route": [
+ {
+ "subnet": "'$ifaddress1'/24",
+ "type": "nstack-kernel",
+ },
+ {
+ "subnet": "'$ifaddress2'/24",
+ "type": "nstack-kernel",
+ },
+ ],
+ "prot_route": [
+ {
+ "proto_type": "1",
+ "type": "nstack-kernel",
+ },
+ {
+ "proto_type": "2",
+ "type": "nstack-kernel",
+ }
+ ],
+}' | tee rd_config.json
+
+ls -l
+
+#only for kernal stack
+if [ ${proc_name} -eq 0 ]; then
+sudo LD_PRELOAD=${LIB_PATH}/libnStackAPI.so ./vs_epoll -p 20000 -d ${dut2_ip} -a 10000 -s ${dut1_ip} -l 200 -t 50000 -i 0 -f 1 -r 20000 -n 1 -w 10 -u 10000 -e 10 -x 1
+else
+sudo LD_PRELOAD=${LIB_PATH}/libnStackAPI.so ./vc_common -p 20000 -d ${dut1_ip} -a 10000 -s ${dut2_ip} -l 200 -t 50000 -i 0 -f 1 -r 20000 -n 1 -w 10 -u 10000 -e 10 -x 1
+fi
+
+cd ${PWDDIR}
+
+ps -elf | grep vs_epoll
+
+sleep 10
diff --git a/tests/dmm/func/SingleCliSer-func.robot b/tests/dmm/func/SingleCliSer-func.robot
new file mode 100644
index 0000000000..fbe651dbfa
--- /dev/null
+++ b/tests/dmm/func/SingleCliSer-func.robot
@@ -0,0 +1,37 @@
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+| Library | resources.libraries.python.NodePath
+| Library | resources.libraries.python.Trace
+| Library | resources.libraries.python.TrafficScriptExecutor
+| Library | resources.libraries.python.DMM.SingleCliSer
+| Resource | resources/libraries/robot/shared/default.robot
+| Resource | resources/libraries/robot/shared/interfaces.robot
+| Resource | resources/libraries/robot/shared/counters.robot
+| Resource | resources/libraries/robot/dmm/dmm_utils.robot
+| Force Tags | 3_NODE_SINGLE_LINK_TOPO | VM_ENV | FUNCTEST | DMM
+| Documentation | *DMM vs epoll test suite.*
+| ...
+| ... | Test suite uses 3-node topology TG - DUT1 - DUT2 - TG with single link
+| ... | between nodes. From this topology only DUT1 and DUT2 nodes are used.
+| ... | here we test the 1. test the vs_epool and vc_epoll
+
+*** Test Cases ***
+| TC01: DMM base vs epoll test case
+| | Given Path for 2-node testing is set | ${nodes['DUT1']} | ${nodes['DUT2']}
+| | And Pick out the port used to execute test
+| | When Exec the base vs epoll test | ${dut1_node} | ${dut2_node}
+| | Echo DMM logs | ${dut2_node}
+| | ${no_packet_loss} = | Get the test result | ${dut2_node}
+| | Then Should Not Be Equal As Integers | ${no_packet_loss} | 0 \ No newline at end of file
diff --git a/tests/dmm/func/__init__.robot b/tests/dmm/func/__init__.robot
new file mode 100644
index 0000000000..3215dc87f9
--- /dev/null
+++ b/tests/dmm/func/__init__.robot
@@ -0,0 +1,18 @@
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+| Resource | resources/libraries/robot/shared/default.robot
+| Resource | resources/libraries/robot/shared/interfaces.robot
+| Library | resources.libraries.python.DMM.SetupDMMTest
+| Suite Setup | Setup DMM Test | ${nodes}