summaryrefslogtreecommitdiffstats
path: root/scripts/automation/regression/stateless_tests
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/automation/regression/stateless_tests')
-rwxr-xr-xscripts/automation/regression/stateless_tests/__init__.py0
-rwxr-xr-xscripts/automation/regression/stateless_tests/stl_benchmark_test.py75
-rw-r--r--scripts/automation/regression/stateless_tests/stl_client_test.py350
-rwxr-xr-xscripts/automation/regression/stateless_tests/stl_examples_test.py31
-rw-r--r--scripts/automation/regression/stateless_tests/stl_general_test.py113
-rw-r--r--scripts/automation/regression/stateless_tests/stl_performance_test.py351
-rw-r--r--scripts/automation/regression/stateless_tests/stl_rx_test.py568
-rwxr-xr-xscripts/automation/regression/stateless_tests/trex_client_pkg_test.py39
8 files changed, 1527 insertions, 0 deletions
diff --git a/scripts/automation/regression/stateless_tests/__init__.py b/scripts/automation/regression/stateless_tests/__init__.py
new file mode 100755
index 00000000..e69de29b
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/__init__.py
diff --git a/scripts/automation/regression/stateless_tests/stl_benchmark_test.py b/scripts/automation/regression/stateless_tests/stl_benchmark_test.py
new file mode 100755
index 00000000..6940efd3
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/stl_benchmark_test.py
@@ -0,0 +1,75 @@
+#!/router/bin/python
+from .stl_general_test import CStlGeneral_Test, CTRexScenario
+from trex_stl_lib.api import *
+import os, sys
+from collections import deque
+from time import time, sleep
+
+class STLBenchmark_Test(CStlGeneral_Test):
+ """Benchark stateless performance"""
+
+ def test_CPU_benchmark(self):
+ critical_test = CTRexScenario.setup_name in ('kiwi02', 'trex08', 'trex09') # temporary patch, this test needs to be fixed
+ timeout = 60 # max time to wait for stabilization
+ stabilize = 5 # ensure stabilization over this period
+ print('')
+
+ for profile_bench in self.get_benchmark_param('profiles'):
+ cpu_utils = deque([0] * stabilize, maxlen = stabilize)
+ bws_per_core = deque([0] * stabilize, maxlen = stabilize)
+ kwargs = profile_bench.get('kwargs', {})
+ print('Testing profile %s, kwargs: %s' % (profile_bench['name'], kwargs))
+ profile = STLProfile.load(os.path.join(CTRexScenario.scripts_path, profile_bench['name']), **kwargs)
+
+ self.stl_trex.reset()
+ self.stl_trex.clear_stats()
+ sleep(1)
+ self.stl_trex.add_streams(profile)
+ mult = '1%' if self.is_virt_nics else '10%'
+ self.stl_trex.start(mult = mult)
+ start_time = time()
+
+ for i in range(timeout + 1):
+ stats = self.stl_trex.get_stats()
+ cpu_utils.append(stats['global']['cpu_util'])
+ bws_per_core.append(stats['global']['bw_per_core'])
+ if i > stabilize and min(cpu_utils) > max(cpu_utils) * 0.95:
+ break
+ sleep(0.5)
+
+ agv_cpu_util = sum(cpu_utils) / stabilize
+ agv_bw_per_core = sum(bws_per_core) / stabilize
+
+ if critical_test and i == timeout and agv_cpu_util > 10:
+ raise Exception('Timeout on waiting for stabilization, last CPU util values: %s' % list(cpu_utils))
+ if stats[0]['opackets'] < 300 or stats[1]['opackets'] < 300:
+ raise Exception('Too few opackets, port0: %s, port1: %s' % (stats[0]['opackets'], stats[1]['opackets']))
+ if stats['global']['queue_full'] > 100000:
+ raise Exception('Too much queue_full: %s' % stats['global']['queue_full'])
+ if not cpu_utils[-1]:
+ raise Exception('CPU util is zero, last values: %s' % list(cpu_utils))
+ print('Done (%ss), CPU util: %4g, bw_per_core: %6sGb/core' % (int(time() - start_time), agv_cpu_util, round(agv_bw_per_core, 2)))
+ # TODO: add check of benchmark based on results from regression
+
+ # report benchmarks
+ if self.GAManager:
+ try:
+ pass
+ #profile_repr = '%s.%s %s' % (CTRexScenario.setup_name,
+ # os.path.basename(profile_bench['name']),
+ # repr(kwargs).replace("'", ''))
+ #self.GAManager.gaAddAction(Event = 'stateless_test', action = profile_repr,
+ # label = 'bw_per_core', value = int(agv_bw_per_core))
+ # TODO: report expected once acquired
+ #self.GAManager.gaAddAction(Event = 'stateless_test', action = profile_repr,
+ # label = 'bw_per_core_exp', value = int(expected_norm_cpu))
+ #self.GAManager.emptyAndReportQ()
+ except Exception as e:
+ print('Sending GA failed: %s' % e)
+
+ def tearDown(self):
+ self.stl_trex.reset()
+ self.stl_trex.clear_stats()
+ sleep(1)
+ CStlGeneral_Test.tearDown(self)
+
diff --git a/scripts/automation/regression/stateless_tests/stl_client_test.py b/scripts/automation/regression/stateless_tests/stl_client_test.py
new file mode 100644
index 00000000..36ac0ee1
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/stl_client_test.py
@@ -0,0 +1,350 @@
+#!/router/bin/python
+from .stl_general_test import CStlGeneral_Test, CTRexScenario
+from trex_stl_lib.api import *
+import os, sys
+import glob
+
+
+def get_error_in_percentage (golden, value):
+ return abs(golden - value) / float(golden)
+
+def get_stl_profiles ():
+ profiles_path = os.path.join(CTRexScenario.scripts_path, 'stl/')
+ py_profiles = glob.glob(profiles_path + "/*.py")
+ yaml_profiles = glob.glob(profiles_path + "yaml/*.yaml")
+ return py_profiles + yaml_profiles
+
+
+class STLClient_Test(CStlGeneral_Test):
+ """Tests for stateless client"""
+
+ def setUp(self):
+ CStlGeneral_Test.setUp(self)
+
+ if self.is_virt_nics:
+ self.percentage = 5
+ self.pps = 500
+ else:
+ self.percentage = 50
+ self.pps = 50000
+
+ # strict mode is only for 'wire only' connection
+ self.strict = True if self.is_loopback and not self.is_virt_nics else False
+
+ assert 'bi' in CTRexScenario.stl_ports_map
+
+ self.c = CTRexScenario.stl_trex
+
+ self.tx_port, self.rx_port = CTRexScenario.stl_ports_map['bi'][0]
+
+ self.c.connect()
+ self.c.reset(ports = [self.tx_port, self.rx_port])
+
+ self.pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/IP()/'a_payload_example')
+ self.profiles = get_stl_profiles()
+
+
+ @classmethod
+ def tearDownClass(cls):
+ if CTRexScenario.stl_init_error:
+ return
+ # connect back at end of tests
+ if not cls.is_connected():
+ CTRexScenario.stl_trex.connect()
+
+
+ def verify (self, expected, got):
+ if self.strict:
+ assert expected == got
+ else:
+ assert get_error_in_percentage(expected, got) < 0.05
+
+
+ def test_basic_connect_disconnect (self):
+ try:
+ self.c.connect()
+ assert self.c.is_connected(), 'client should be connected'
+ self.c.disconnect()
+ assert not self.c.is_connected(), 'client should be disconnected'
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ def test_basic_single_burst (self):
+
+ try:
+ b1 = STLStream(name = 'burst',
+ packet = self.pkt,
+ mode = STLTXSingleBurst(total_pkts = 100,
+ percentage = self.percentage)
+ )
+
+ for i in range(0, 5):
+ self.c.add_streams([b1], ports = [self.tx_port, self.rx_port])
+
+ self.c.clear_stats()
+ self.c.start(ports = [self.tx_port, self.rx_port])
+
+ self.c.wait_on_traffic(ports = [self.tx_port, self.rx_port])
+ stats = self.c.get_stats()
+
+ assert self.tx_port in stats
+ assert self.rx_port in stats
+
+ self.verify(100, stats[self.tx_port]['opackets'])
+ self.verify(100, stats[self.rx_port]['ipackets'])
+
+ self.verify(100, stats[self.rx_port]['opackets'])
+ self.verify(100, stats[self.tx_port]['ipackets'])
+
+
+ self.c.remove_all_streams(ports = [self.tx_port, self.rx_port])
+
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ #
+ def test_basic_multi_burst (self):
+ try:
+ b1 = STLStream(name = 'burst',
+ packet = self.pkt,
+ mode = STLTXMultiBurst(pkts_per_burst = 10,
+ count = 20,
+ percentage = self.percentage)
+ )
+
+ for i in range(0, 5):
+ self.c.add_streams([b1], ports = [self.tx_port, self.rx_port])
+
+ self.c.clear_stats()
+ self.c.start(ports = [self.tx_port, self.rx_port])
+
+ self.c.wait_on_traffic(ports = [self.tx_port, self.rx_port])
+ stats = self.c.get_stats()
+
+ assert self.tx_port in stats
+ assert self.rx_port in stats
+
+ self.verify(200, stats[self.tx_port]['opackets'])
+ self.verify(200, stats[self.rx_port]['ipackets'])
+
+ self.verify(200, stats[self.rx_port]['opackets'])
+ self.verify(200, stats[self.tx_port]['ipackets'])
+
+ self.c.remove_all_streams(ports = [self.tx_port, self.rx_port])
+
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ #
+ def test_basic_cont (self):
+ pps = self.pps
+ duration = 0.1
+ golden = pps * duration
+
+ try:
+ b1 = STLStream(name = 'burst',
+ packet = self.pkt,
+ mode = STLTXCont(pps = pps)
+ )
+
+ for i in range(0, 5):
+ self.c.add_streams([b1], ports = [self.tx_port, self.rx_port])
+
+ self.c.clear_stats()
+ self.c.start(ports = [self.tx_port, self.rx_port], duration = duration)
+
+ assert self.c.ports[self.tx_port].is_transmitting(), 'port should be active'
+ assert self.c.ports[self.rx_port].is_transmitting(), 'port should be active'
+
+ self.c.wait_on_traffic(ports = [self.tx_port, self.rx_port])
+ stats = self.c.get_stats()
+
+ assert self.tx_port in stats
+ assert self.rx_port in stats
+
+ # cont. with duration should be quite percise - 5% error is relaxed enough
+
+ assert get_error_in_percentage(stats[self.tx_port]['opackets'], golden) < 0.05
+ assert get_error_in_percentage(stats[self.rx_port]['ipackets'], golden) < 0.05
+
+ assert get_error_in_percentage(stats[self.rx_port]['opackets'], golden) < 0.05
+ assert get_error_in_percentage(stats[self.tx_port]['ipackets'], golden) < 0.05
+
+
+ self.c.remove_all_streams(ports = [self.tx_port, self.rx_port])
+
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ def test_stress_connect_disconnect (self):
+ try:
+ for i in range(0, 100):
+ self.c.connect()
+ assert self.c.is_connected(), 'client should be connected'
+ self.c.disconnect()
+ assert not self.c.is_connected(), 'client should be disconnected'
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+
+ def test_stress_tx (self):
+ try:
+ s1 = STLStream(name = 'stress',
+ packet = self.pkt,
+ mode = STLTXCont(percentage = self.percentage))
+
+ # add both streams to ports
+ self.c.add_streams([s1], ports = [self.tx_port, self.rx_port])
+ for i in range(0, 100):
+
+ self.c.start(ports = [self.tx_port, self.rx_port])
+
+ assert self.c.ports[self.tx_port].is_transmitting(), 'port should be active'
+ assert self.c.ports[self.rx_port].is_transmitting(), 'port should be active'
+
+ self.c.pause(ports = [self.tx_port, self.rx_port])
+
+ assert self.c.ports[self.tx_port].is_paused(), 'port should be paused'
+ assert self.c.ports[self.rx_port].is_paused(), 'port should be paused'
+
+ self.c.resume(ports = [self.tx_port, self.rx_port])
+
+ assert self.c.ports[self.tx_port].is_transmitting(), 'port should be active'
+ assert self.c.ports[self.rx_port].is_transmitting(), 'port should be active'
+
+ self.c.stop(ports = [self.tx_port, self.rx_port])
+
+ assert not self.c.ports[self.tx_port].is_active(), 'port should be idle'
+ assert not self.c.ports[self.rx_port].is_active(), 'port should be idle'
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ def test_all_profiles (self):
+ if self.is_virt_nics or not self.is_loopback:
+ self.skip('skipping profile tests for virtual / non loopback')
+ return
+
+ try:
+
+ for profile in self.profiles:
+
+ print("now testing profile {0}...\n".format(profile))
+
+ p1 = STLProfile.load(profile, port_id = self.tx_port)
+ p2 = STLProfile.load(profile, port_id = self.rx_port)
+
+ # if profile contains custom MAC addrs we need promiscuous mode
+ # but virtual NICs does not support promiscuous mode
+ self.c.set_port_attr(ports = [self.tx_port, self.rx_port], promiscuous = False)
+
+ if p1.has_custom_mac_addr():
+ if not self.is_virt_nics:
+ self.c.set_port_attr(ports = [self.tx_port, self.rx_port], promiscuous = True)
+ else:
+ print("\n*** profile needs promiscuous mode but running on virtual NICs - skipping... ***\n")
+ continue
+
+ if p1.has_flow_stats():
+ print("\n*** profile needs RX caps - skipping... ***\n")
+ continue
+
+ self.c.add_streams(p1, ports = self.tx_port)
+ self.c.add_streams(p2, ports = self.rx_port)
+
+ self.c.clear_stats()
+
+ self.c.start(ports = [self.tx_port, self.rx_port], mult = "30%")
+ time.sleep(100 / 1000.0)
+
+ if p1.is_pauseable() and p2.is_pauseable():
+ self.c.pause(ports = [self.tx_port, self.rx_port])
+ time.sleep(100 / 1000.0)
+
+ self.c.resume(ports = [self.tx_port, self.rx_port])
+ time.sleep(100 / 1000.0)
+
+ self.c.stop(ports = [self.tx_port, self.rx_port])
+
+ stats = self.c.get_stats()
+
+ assert self.tx_port in stats, '{0} - no stats for TX port'.format(profile)
+ assert self.rx_port in stats, '{0} - no stats for RX port'.format(profile)
+
+ self.verify(stats[self.tx_port]['opackets'], stats[self.rx_port]['ipackets'])
+ self.verify(stats[self.rx_port]['opackets'], stats[self.tx_port]['ipackets'])
+
+ self.c.remove_all_streams(ports = [self.tx_port, self.rx_port])
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ finally:
+ self.c.set_port_attr(ports = [self.tx_port, self.rx_port], promiscuous = False)
+
+
+ # see https://trex-tgn.cisco.com/youtrack/issue/trex-226
+ def test_latency_pause_resume (self):
+
+ try:
+
+ s1 = STLStream(name = 'latency',
+ packet = self.pkt,
+ mode = STLTXCont(percentage = self.percentage),
+ flow_stats = STLFlowLatencyStats(pg_id = 1))
+
+ self.c.add_streams([s1], ports = self.tx_port)
+
+ self.c.clear_stats()
+
+ self.c.start(ports = self.tx_port)
+
+ for i in range(100):
+ self.c.pause()
+ self.c.resume()
+
+ self.c.stop()
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ def test_pcap_remote (self):
+ try:
+ pcap_file = os.path.join(CTRexScenario.scripts_path, 'automation/regression/test_pcaps/pcap_dual_test.erf')
+
+ master = self.tx_port
+ slave = master ^ 0x1
+
+ self.c.reset(ports = [master, slave])
+ self.c.clear_stats()
+ self.c.push_remote(pcap_file,
+ ports = [master],
+ ipg_usec = 100,
+ is_dual = True)
+ self.c.wait_on_traffic(ports = [master])
+
+ stats = self.c.get_stats()
+
+ self.verify(stats[master]['opackets'], 52)
+ self.verify(stats[slave]['opackets'], 48)
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
diff --git a/scripts/automation/regression/stateless_tests/stl_examples_test.py b/scripts/automation/regression/stateless_tests/stl_examples_test.py
new file mode 100755
index 00000000..71fc3287
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/stl_examples_test.py
@@ -0,0 +1,31 @@
+#!/router/bin/python
+from .stl_general_test import CStlGeneral_Test, CTRexScenario
+import os, sys
+from misc_methods import run_command
+
+
+class STLExamples_Test(CStlGeneral_Test):
+ """This class defines the IMIX testcase of the TRex traffic generator"""
+
+ def explicitSetUp(self):
+ # examples connect by their own
+ if self.is_connected():
+ CTRexScenario.stl_trex.disconnect()
+
+ def explicitTearDown(self):
+ # connect back at end of tests
+ if not self.is_connected():
+ self.stl_trex.connect()
+
+ def test_stl_examples(self):
+ examples_dir = '../trex_control_plane/stl/examples'
+ examples_to_test = [
+ 'stl_imix.py',
+ ]
+
+ for example in examples_to_test:
+ self.explicitSetUp()
+ return_code, stdout, stderr = run_command("sh -c 'cd %s; %s %s -s %s'" % (examples_dir, sys.executable, example, CTRexScenario.configuration.trex['trex_name']))
+ self.explicitTearDown()
+ assert return_code == 0, 'example %s failed.\nstdout: %s\nstderr: %s' % (return_code, stdout, stderr)
+
diff --git a/scripts/automation/regression/stateless_tests/stl_general_test.py b/scripts/automation/regression/stateless_tests/stl_general_test.py
new file mode 100644
index 00000000..590733ba
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/stl_general_test.py
@@ -0,0 +1,113 @@
+import os, sys
+import unittest
+from trex import CTRexScenario
+from stateful_tests.trex_general_test import CTRexGeneral_Test
+from trex_stl_lib.api import *
+import time
+from nose.tools import nottest
+
+class CStlGeneral_Test(CTRexGeneral_Test):
+ """This class defines the general stateless testcase of the TRex traffic generator"""
+
+ def setUp(self):
+ self.stl_trex = CTRexScenario.stl_trex if CTRexScenario.stl_trex else 'mock'
+ CTRexGeneral_Test.setUp(self)
+ # check basic requirements, should be verified at test_connectivity, here only skip test
+ if CTRexScenario.stl_init_error:
+ self.skip(CTRexScenario.stl_init_error)
+
+ def connect(self, timeout = 100):
+ # need delay and check only because TRex process might be still starting
+ sys.stdout.write('Connecting')
+ for i in range(timeout):
+ try:
+ sys.stdout.write('.')
+ sys.stdout.flush()
+ self.stl_trex.connect()
+ print('')
+ return True
+ except:
+ time.sleep(0.1)
+ print('')
+ return False
+
+ def map_ports(self, timeout = 100):
+ sys.stdout.write('Mapping ports')
+ for i in range(timeout):
+ sys.stdout.write('.')
+ sys.stdout.flush()
+ CTRexScenario.stl_ports_map = stl_map_ports(self.stl_trex)
+ if self.verify_bidirectional(CTRexScenario.stl_ports_map):
+ print('')
+ return True
+ time.sleep(0.1)
+ print('')
+ return False
+
+ # verify all the ports are bidirectional
+ @staticmethod
+ def verify_bidirectional(mapping_dict):
+ if len(mapping_dict['unknown']):
+ return False
+ if len(mapping_dict['bi']) * 2 == len(mapping_dict['map']):
+ return True
+ return False
+
+ @staticmethod
+ def get_port_count():
+ return CTRexScenario.stl_trex.get_port_count()
+
+ @staticmethod
+ def is_connected():
+ return CTRexScenario.stl_trex.is_connected()
+
+class STLBasic_Test(CStlGeneral_Test):
+ # will run it first explicitly, check connectivity and configure routing
+ @nottest
+ def test_connectivity(self):
+ if not self.is_loopback:
+ try:
+ sys.stdout.flush()
+ sys.stdout.write('Configuring DUT... ')
+ start_time = time.time()
+ if CTRexScenario.router_cfg['forceCleanConfig']:
+ CTRexScenario.router.load_clean_config()
+ CTRexScenario.router.configure_basic_interfaces()
+ CTRexScenario.router.config_pbr(mode = "config")
+ CTRexScenario.router.config_ipv6_pbr(mode = "config")
+ sys.stdout.write('done. (%ss)\n' % int(time.time() - start_time))
+ except Exception as e:
+ print('')
+ CTRexScenario.stl_init_error = 'Could not configure device, err: %s' % e
+ self.fail(CTRexScenario.stl_init_error)
+ return
+
+ try:
+ sys.stdout.write('Starting TRex... ')
+ start_time = time.time()
+ cores = self.configuration.trex.get('trex_cores', 1)
+ if self.is_virt_nics and cores > 1:
+ raise Exception('Number of cores should be 1 with virtual NICs')
+ if not CTRexScenario.no_daemon:
+ self.trex.start_stateless(c = cores)
+ self.stl_trex = STLClient(username = 'TRexRegression',
+ server = self.configuration.trex['trex_name'],
+ verbose_level = CTRexScenario.json_verbose)
+ CTRexScenario.stl_trex = self.stl_trex
+ sys.stdout.write('done. (%ss)\n' % int(time.time() - start_time))
+ except Exception as e:
+ print('')
+ CTRexScenario.stl_init_error = 'Could not start stateless TRex, err: %s' % e
+ self.fail(CTRexScenario.stl_init_error)
+ return
+
+ if not self.connect():
+ CTRexScenario.stl_init_error = 'Client could not connect'
+ self.fail(CTRexScenario.stl_init_error)
+ return
+ print('Connected')
+ if not self.map_ports():
+ CTRexScenario.stl_init_error = 'Client could not map ports'
+ self.fail(CTRexScenario.stl_init_error)
+ return
+ print('Got ports mapping: %s' % CTRexScenario.stl_ports_map)
diff --git a/scripts/automation/regression/stateless_tests/stl_performance_test.py b/scripts/automation/regression/stateless_tests/stl_performance_test.py
new file mode 100644
index 00000000..a556daf3
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/stl_performance_test.py
@@ -0,0 +1,351 @@
+import os
+from .stl_general_test import CStlGeneral_Test, CTRexScenario
+from trex_stl_lib.api import *
+
+def avg (values):
+ return (sum(values) / float(len(values)))
+
+# performance report object
+class PerformanceReport(object):
+ GOLDEN_NORMAL = 1
+ GOLDEN_FAIL = 2
+ GOLDEN_BETTER = 3
+
+ def __init__ (self,
+ scenario,
+ machine_name,
+ core_count,
+ avg_cpu,
+ avg_gbps,
+ avg_mpps,
+ avg_gbps_per_core,
+ avg_mpps_per_core,
+ ):
+
+ self.scenario = scenario
+ self.machine_name = machine_name
+ self.core_count = core_count
+ self.avg_cpu = avg_cpu
+ self.avg_gbps = avg_gbps
+ self.avg_mpps = avg_mpps
+ self.avg_gbps_per_core = avg_gbps_per_core
+ self.avg_mpps_per_core = avg_mpps_per_core
+
+ def show (self):
+
+ print("\n")
+ print("scenario: {0}".format(self.scenario))
+ print("machine name: {0}".format(self.machine_name))
+ print("DP core count: {0}".format(self.core_count))
+ print("average CPU: {0}".format(self.avg_cpu))
+ print("average Gbps: {0}".format(self.avg_gbps))
+ print("average Mpps: {0}".format(self.avg_mpps))
+ print("average pkt size (bytes): {0}".format( (self.avg_gbps * 1000 / 8) / self.avg_mpps))
+ print("average Gbps per core (at 100% CPU): {0}".format(self.avg_gbps_per_core))
+ print("average Mpps per core (at 100% CPU): {0}".format(self.avg_mpps_per_core))
+
+
+ def check_golden (self, golden_mpps):
+ if self.avg_mpps_per_core < golden_mpps['min']:
+ return self.GOLDEN_FAIL
+
+ if self.avg_mpps_per_core > golden_mpps['max']:
+ return self.GOLDEN_BETTER
+
+ return self.GOLDEN_NORMAL
+
+ def report_to_analytics(self, ga, golden_mpps):
+ print("\n* Reporting to GA *\n")
+ ga.gaAddTestQuery(TestName = self.scenario,
+ TRexMode = 'stl',
+ SetupName = self.machine_name,
+ TestType = 'performance',
+ Mppspc = self.avg_mpps_per_core,
+ ActionNumber = os.getenv("BUILD_ID","n/a"),
+ GoldenMin = golden_mpps['min'],
+ GoldenMax = golden_mpps['max'])
+
+ ga.emptyAndReportQ()
+
+
+class STLPerformance_Test(CStlGeneral_Test):
+ """Tests for stateless client"""
+
+ def setUp(self):
+
+ CStlGeneral_Test.setUp(self)
+
+ self.c = CTRexScenario.stl_trex
+ self.c.connect()
+ self.c.reset()
+
+
+
+ def tearDown (self):
+ CStlGeneral_Test.tearDown(self)
+
+
+ def build_perf_profile_vm (self, pkt_size, cache_size = None):
+ size = pkt_size - 4; # HW will add 4 bytes ethernet FCS
+ src_ip = '16.0.0.1'
+ dst_ip = '48.0.0.1'
+
+ base_pkt = Ether()/IP(src=src_ip,dst=dst_ip)/UDP(dport=12,sport=1025)
+ pad = max(0, size - len(base_pkt)) * 'x'
+
+ vm = STLScVmRaw( [ STLVmFlowVar ( "ip_src", min_value="10.0.0.1", max_value="10.0.0.255", size=4, step=1,op="inc"),
+ STLVmWrFlowVar (fv_name="ip_src", pkt_offset= "IP.src" ),
+ STLVmFixIpv4(offset = "IP")
+ ],
+ cache_size = cache_size
+ );
+
+ pkt = STLPktBuilder(pkt = base_pkt/pad, vm = vm)
+ return STLStream(packet = pkt, mode = STLTXCont())
+
+
+ def build_perf_profile_syn_attack (self, pkt_size):
+ size = pkt_size - 4; # HW will add 4 bytes ethernet FCS
+
+ # TCP SYN
+ base_pkt = Ether()/IP(dst="48.0.0.1")/TCP(dport=80,flags="S")
+ pad = max(0, size - len(base_pkt)) * 'x'
+
+ # vm
+ vm = STLScVmRaw( [ STLVmFlowVar(name="ip_src",
+ min_value="16.0.0.0",
+ max_value="18.0.0.254",
+ size=4, op="random"),
+
+ STLVmFlowVar(name="src_port",
+ min_value=1025,
+ max_value=65000,
+ size=2, op="random"),
+
+ STLVmWrFlowVar(fv_name="ip_src", pkt_offset= "IP.src" ),
+
+ STLVmFixIpv4(offset = "IP"), # fix checksum
+
+ STLVmWrFlowVar(fv_name="src_port",
+ pkt_offset= "TCP.sport") # fix udp len
+
+ ]
+ )
+
+ pkt = STLPktBuilder(pkt = base_pkt,
+ vm = vm)
+
+ return STLStream(packet = pkt,
+ random_seed = 0x1234,# can be remove. will give the same random value any run
+ mode = STLTXCont())
+
+
+
+ # single CPU, VM, no cache, 64 bytes
+ def test_performance_vm_single_cpu (self):
+ setup_cfg = self.get_benchmark_param('cfg')
+ scenario_cfg = {}
+
+ scenario_cfg['name'] = "VM - 64 bytes, single CPU"
+ scenario_cfg['streams'] = self.build_perf_profile_vm(64)
+ scenario_cfg['core_count'] = 1
+
+ scenario_cfg['mult'] = setup_cfg['mult']
+ scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden']
+
+
+
+ self.execute_single_scenario(scenario_cfg)
+
+
+ # single CPU, VM, cached, 64 bytes
+ def test_performance_vm_single_cpu_cached (self):
+ setup_cfg = self.get_benchmark_param('cfg')
+ scenario_cfg = {}
+
+ scenario_cfg['name'] = "VM - 64 bytes, single CPU, cache size 1024"
+ scenario_cfg['streams'] = self.build_perf_profile_vm(64, cache_size = 1024)
+ scenario_cfg['core_count'] = 1
+
+ scenario_cfg['mult'] = setup_cfg['mult']
+ scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden']
+
+ self.execute_single_scenario(scenario_cfg)
+
+
+ # single CPU, syn attack, 64 bytes
+ def test_performance_syn_attack_single_cpu (self):
+ setup_cfg = self.get_benchmark_param('cfg')
+ scenario_cfg = {}
+
+ scenario_cfg['name'] = "syn attack - 64 bytes, single CPU"
+ scenario_cfg['streams'] = self.build_perf_profile_syn_attack(64)
+ scenario_cfg['core_count'] = 1
+
+ scenario_cfg['mult'] = setup_cfg['mult']
+ scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden']
+
+ self.execute_single_scenario(scenario_cfg)
+
+
+ # two CPUs, VM, no cache, 64 bytes
+ def test_performance_vm_multi_cpus (self):
+ setup_cfg = self.get_benchmark_param('cfg')
+ scenario_cfg = {}
+
+ scenario_cfg['name'] = "VM - 64 bytes, multi CPUs"
+ scenario_cfg['streams'] = self.build_perf_profile_vm(64)
+
+ scenario_cfg['core_count'] = setup_cfg['core_count']
+ scenario_cfg['mult'] = setup_cfg['mult']
+ scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden']
+
+ self.execute_single_scenario(scenario_cfg)
+
+
+
+ # multi CPUs, VM, cached, 64 bytes
+ def test_performance_vm_multi_cpus_cached (self):
+ setup_cfg = self.get_benchmark_param('cfg')
+ scenario_cfg = {}
+
+ scenario_cfg['name'] = "VM - 64 bytes, multi CPU, cache size 1024"
+ scenario_cfg['streams'] = self.build_perf_profile_vm(64, cache_size = 1024)
+
+
+ scenario_cfg['core_count'] = setup_cfg['core_count']
+ scenario_cfg['mult'] = setup_cfg['mult']
+ scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden']
+
+ self.execute_single_scenario(scenario_cfg)
+
+
+ # multi CPUs, syn attack, 64 bytes
+ def test_performance_syn_attack_multi_cpus (self):
+ setup_cfg = self.get_benchmark_param('cfg')
+ scenario_cfg = {}
+
+ scenario_cfg['name'] = "syn attack - 64 bytes, multi CPUs"
+ scenario_cfg['streams'] = self.build_perf_profile_syn_attack(64)
+
+ scenario_cfg['core_count'] = setup_cfg['core_count']
+ scenario_cfg['mult'] = setup_cfg['mult']
+ scenario_cfg['mpps_per_core_golden'] = setup_cfg['mpps_per_core_golden']
+
+ self.execute_single_scenario(scenario_cfg)
+
+
+
+############################################# test's infra functions ###########################################
+
+ def execute_single_scenario (self, scenario_cfg, iterations = 4):
+ golden = scenario_cfg['mpps_per_core_golden']
+
+
+ for i in range(iterations, -1, -1):
+ report = self.execute_single_scenario_iteration(scenario_cfg)
+ rc = report.check_golden(golden)
+
+ if (rc == PerformanceReport.GOLDEN_NORMAL) or (rc == PerformanceReport.GOLDEN_BETTER):
+ if self.GAManager:
+ report.report_to_analytics(self.GAManager, golden)
+
+ return
+
+ if rc == PerformanceReport.GOLDEN_BETTER:
+ return
+
+ print("\n*** Measured Mpps per core '{0}' is lower than expected golden '{1} - re-running scenario...{2} attempts left".format(report.avg_mpps_per_core, scenario_cfg['mpps_per_core_golden'], i))
+
+ assert 0, "performance failure"
+
+
+
+
+ def execute_single_scenario_iteration (self, scenario_cfg):
+
+ print("\nExecuting performance scenario: '{0}'\n".format(scenario_cfg['name']))
+
+ self.c.reset(ports = [0])
+ self.c.add_streams(ports = [0], streams = scenario_cfg['streams'])
+
+ # use one core
+ cores_per_port = self.c.system_info.get('dp_core_count_per_port', 0)
+ if cores_per_port < scenario_cfg['core_count']:
+ assert 0, "test configuration requires {0} cores but only {1} per port are available".format(scenario_cfg['core_count'], cores_per_port)
+
+ core_mask = (2 ** scenario_cfg['core_count']) - 1
+ self.c.start(ports = [0], mult = scenario_cfg['mult'], core_mask = [core_mask])
+
+ # stablize
+ print("Step 1 - waiting for stabilization... (10 seconds)")
+ for _ in range(10):
+ time.sleep(1)
+ sys.stdout.write('.')
+ sys.stdout.flush()
+
+ print("\n")
+
+ samples = {'cpu' : [], 'bps': [], 'pps': []}
+
+ # let the server gather samples
+ print("Step 2 - Waiting for samples... (60 seconds)")
+
+ for i in range(0, 3):
+
+ # sample bps/pps
+ for _ in range(0, 20):
+ stats = self.c.get_stats(ports = 0)
+ samples['bps'].append(stats[0]['tx_bps'])
+ samples['pps'].append(stats[0]['tx_pps'])
+ time.sleep(1)
+ sys.stdout.write('.')
+ sys.stdout.flush()
+
+ # sample CPU per core
+ rc = self.c._transmit('get_utilization')
+ if not rc:
+ raise Exception(rc)
+
+ data = rc.data()['cpu']
+ # filter
+ data = [s for s in data if s['ports'][0] == 0]
+
+ assert len(data) == scenario_cfg['core_count'] , "sampling info does not match core count"
+
+ for s in data:
+ samples['cpu'] += s['history']
+
+
+ stats = self.c.get_stats(ports = 0)
+ self.c.stop(ports = [0])
+
+
+
+ avg_values = {k:avg(v) for k, v in samples.items()}
+ avg_cpu = avg_values['cpu'] * scenario_cfg['core_count']
+ avg_gbps = avg_values['bps'] / 1e9
+ avg_mpps = avg_values['pps'] / 1e6
+
+ avg_gbps_per_core = avg_gbps * (100.0 / avg_cpu)
+ avg_mpps_per_core = avg_mpps * (100.0 / avg_cpu)
+
+ report = PerformanceReport(scenario = scenario_cfg['name'],
+ machine_name = CTRexScenario.setup_name,
+ core_count = scenario_cfg['core_count'],
+ avg_cpu = avg_cpu,
+ avg_gbps = avg_gbps,
+ avg_mpps = avg_mpps,
+ avg_gbps_per_core = avg_gbps_per_core,
+ avg_mpps_per_core = avg_mpps_per_core)
+
+
+ report.show()
+
+ print("")
+ golden = scenario_cfg['mpps_per_core_golden']
+ print("golden Mpps per core (at 100% CPU): min: {0}, max {1}".format(golden['min'], golden['max']))
+
+
+ return report
+
diff --git a/scripts/automation/regression/stateless_tests/stl_rx_test.py b/scripts/automation/regression/stateless_tests/stl_rx_test.py
new file mode 100644
index 00000000..524ad4bf
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/stl_rx_test.py
@@ -0,0 +1,568 @@
+#!/router/bin/python
+from .stl_general_test import CStlGeneral_Test, CTRexScenario
+from trex_stl_lib.api import *
+import os, sys
+
+ERROR_LATENCY_TOO_HIGH = 1
+
+class STLRX_Test(CStlGeneral_Test):
+ """Tests for RX feature"""
+
+ def setUp(self):
+ per_driver_params = {
+ 'rte_vmxnet3_pmd': {
+ 'rate_percent': 1,
+ 'total_pkts': 50,
+ 'rate_latency': 1,
+ 'latency_9k_enable': False,
+ },
+ 'rte_ixgbe_pmd': {
+ 'rate_percent': 30,
+ 'total_pkts': 1000,
+ 'rate_latency': 1,
+ 'latency_9k_enable': True,
+ 'latency_9k_max_average': 300,
+ 'latency_9k_max_latency': 400,
+ },
+ 'rte_i40e_pmd': {
+ 'rate_percent': 80,
+ 'total_pkts': 1000,
+ 'rate_latency': 1,
+ 'latency_9k_enable': True,
+ 'latency_9k_max_average': 100,
+ 'latency_9k_max_latency': 250,
+ },
+ 'rte_igb_pmd': {
+ 'rate_percent': 80,
+ 'total_pkts': 500,
+ 'rate_latency': 1,
+ 'latency_9k_enable': False,
+ },
+ 'rte_em_pmd': {
+ 'rate_percent': 1,
+ 'total_pkts': 50,
+ 'rate_latency': 1,
+ 'latency_9k_enable': False,
+ },
+ 'rte_virtio_pmd': {
+ 'rate_percent': 1,
+ 'total_pkts': 50,
+ 'rate_latency': 1,
+ 'latency_9k_enable': False,
+ 'allow_packets_drop_num': 1, # allow 1 pkt drop
+ },
+ }
+
+ CStlGeneral_Test.setUp(self)
+ assert 'bi' in CTRexScenario.stl_ports_map
+
+ self.c = CTRexScenario.stl_trex
+
+ self.tx_port, self.rx_port = CTRexScenario.stl_ports_map['bi'][0]
+
+ port_info = self.c.get_port_info(ports = self.rx_port)[0]
+ self.speed = port_info['speed']
+
+
+ cap = port_info['rx']['caps']
+ if "flow_stats" not in cap or "latency" not in cap:
+ self.skip('port {0} does not support RX'.format(self.rx_port))
+ self.cap = cap
+
+ drv_name = port_info['driver']
+ if drv_name == 'rte_ixgbe_pmd':
+ self.ipv6_support = False
+ else:
+ self.ipv6_support = True
+ self.rate_percent = per_driver_params[drv_name]['rate_percent']
+ self.total_pkts = per_driver_params[drv_name]['total_pkts']
+ self.rate_lat = per_driver_params[drv_name].get('rate_latency', self.rate_percent)
+ self.latency_9k_enable = per_driver_params[drv_name]['latency_9k_enable']
+ self.latency_9k_max_average = per_driver_params[drv_name].get('latency_9k_max_average')
+ self.latency_9k_max_latency = per_driver_params[drv_name].get('latency_9k_max_latency')
+ self.allow_drop = per_driver_params[drv_name].get('allow_packets_drop_num', 0)
+
+ self.lat_pps = 1000
+ self.drops_expected = False
+ self.c.reset(ports = [self.tx_port, self.rx_port])
+
+ vm = STLScVmRaw( [ STLVmFlowVar ( "ip_src", min_value="10.0.0.1",
+ max_value="10.0.0.255", size=4, step=1,op="inc"),
+ STLVmWrFlowVar (fv_name="ip_src", pkt_offset= "IP.src" ), # write ip to packet IP.src
+ STLVmFixIpv4(offset = "IP") # fix checksum
+ ]
+ # Latency is bound to one core. We test that this option is not causing trouble
+ ,split_by_field = "ip_src"
+ ,cache_size =255 # Cache is ignored by latency flows. Need to test it is not crashing.
+ );
+
+ self.pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('Your_paylaod_comes_here'))
+ self.ipv6pkt = STLPktBuilder(pkt = Ether()/IPv6(dst="2001:0:4137:9350:8000:f12a:b9c8:2815",src="2001:4860:0:2001::68")
+ /UDP(dport=12,sport=1025)/('Your_paylaod_comes_here'))
+ self.large_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*1000))
+ self.pkt_9k = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*9000))
+ self.vm_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")
+ / UDP(dport=12,sport=1025)/('Your_paylaod_comes_here')
+ , vm = vm)
+ self.vm_large_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*1000)
+ , vm = vm)
+ self.vm_9k_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*9000)
+ ,vm = vm)
+
+
+ @classmethod
+ def tearDownClass(cls):
+ if CTRexScenario.stl_init_error:
+ return
+ # connect back at end of tests
+ if not cls.is_connected():
+ CTRexScenario.stl_trex.connect()
+
+
+ def __verify_latency (self, latency_stats,max_latency,max_average):
+
+ error=0;
+ err_latency = latency_stats['err_cntrs']
+ latency = latency_stats['latency']
+
+ for key in err_latency :
+ error +=err_latency[key]
+ if error !=0 :
+ pprint.pprint(err_latency)
+ tmp = 'RX pkts ERROR - one of the error is on'
+ print(tmp)
+ assert False, tmp
+
+ if latency['average']> max_average:
+ pprint.pprint(latency_stats)
+ tmp = 'Average latency is too high {0} {1} '.format(latency['average'], max_average)
+ print(tmp)
+ return ERROR_LATENCY_TOO_HIGH
+
+ if latency['total_max']> max_latency:
+ pprint.pprint(latency_stats)
+ tmp = 'Max latency is too high {0} {1} '.format(latency['total_max'], max_latency)
+ print(tmp)
+ return ERROR_LATENCY_TOO_HIGH
+
+ return 0
+
+
+
+ def __verify_flow (self, pg_id, total_pkts, pkt_len, stats):
+ flow_stats = stats['flow_stats'].get(pg_id)
+ latency_stats = stats['latency'].get(pg_id)
+
+ if not flow_stats:
+ assert False, "no flow stats available"
+
+ tx_pkts = flow_stats['tx_pkts'].get(self.tx_port, 0)
+ tx_bytes = flow_stats['tx_bytes'].get(self.tx_port, 0)
+ rx_pkts = flow_stats['rx_pkts'].get(self.rx_port, 0)
+ if latency_stats is not None:
+ drops = latency_stats['err_cntrs']['dropped']
+ ooo = latency_stats['err_cntrs']['out_of_order']
+ dup = latency_stats['err_cntrs']['dup']
+ sth = latency_stats['err_cntrs']['seq_too_high']
+ stl = latency_stats['err_cntrs']['seq_too_low']
+ lat = latency_stats['latency']
+ if ooo != 0 or dup != 0 or stl != 0:
+ pprint.pprint(latency_stats)
+ tmp='Error packets - dropped:{0}, ooo:{1} dup:{2} seq too high:{3} seq too low:{4}'.format(drops, ooo, dup, sth, stl)
+ assert False, tmp
+
+ if (drops > self.allow_drop or sth != 0) and not self.drops_expected:
+ pprint.pprint(latency_stats)
+ tmp='Error packets - dropped:{0}, ooo:{1} dup:{2} seq too high:{3} seq too low:{4}'.format(drops, ooo, dup, sth, stl)
+ assert False, tmp
+
+ if tx_pkts != total_pkts:
+ pprint.pprint(flow_stats)
+ tmp = 'TX pkts mismatch - got: {0}, expected: {1}'.format(tx_pkts, total_pkts)
+ assert False, tmp
+
+ if tx_bytes != (total_pkts * pkt_len):
+ pprint.pprint(flow_stats)
+ tmp = 'TX bytes mismatch - got: {0}, expected: {1}'.format(tx_bytes, (total_pkts * pkt_len))
+ assert False, tmp
+
+ if abs(total_pkts - rx_pkts) > self.allow_drop and not self.drops_expected:
+ pprint.pprint(flow_stats)
+ tmp = 'RX pkts mismatch - got: {0}, expected: {1}'.format(rx_pkts, total_pkts)
+ assert False, tmp
+
+ if "rx_bytes" in self.cap:
+ rx_bytes = flow_stats['rx_bytes'].get(self.rx_port, 0)
+ if abs(rx_bytes / pkt_len - total_pkts ) > self.allow_drop and not self.drops_expected:
+ pprint.pprint(flow_stats)
+ tmp = 'RX bytes mismatch - got: {0}, expected: {1}'.format(rx_bytes, (total_pkts * pkt_len))
+ assert False, tmp
+
+
+ # RX itreation
+ def __rx_iteration (self, exp_list):
+
+ self.c.clear_stats()
+
+ self.c.start(ports = [self.tx_port])
+ self.c.wait_on_traffic(ports = [self.tx_port])
+ stats = self.c.get_stats()
+
+ for exp in exp_list:
+ self.__verify_flow(exp['pg_id'], exp['total_pkts'], exp['pkt_len'], stats)
+
+
+ # one stream on TX --> RX
+ def test_one_stream(self):
+ total_pkts = self.total_pkts
+
+ try:
+ s1 = STLStream(name = 'rx',
+ packet = self.pkt,
+ flow_stats = STLFlowLatencyStats(pg_id = 5),
+ mode = STLTXSingleBurst(total_pkts = total_pkts,
+ percentage = self.rate_lat
+ ))
+
+ # add both streams to ports
+ self.c.add_streams([s1], ports = [self.tx_port])
+
+ print("\ninjecting {0} packets on port {1}\n".format(total_pkts, self.tx_port))
+
+ exp = {'pg_id': 5, 'total_pkts': total_pkts, 'pkt_len': s1.get_pkt_len()}
+
+ self.__rx_iteration( [exp] )
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+ def test_multiple_streams(self):
+ if self.is_virt_nics:
+ self.skip('Skip this for virtual NICs')
+
+ num_latency_streams = 128
+ num_flow_stat_streams = 127
+ total_pkts = int(self.total_pkts / (num_latency_streams + num_flow_stat_streams))
+ if total_pkts == 0:
+ total_pkts = 1
+ percent = float(self.rate_lat) / (num_latency_streams + num_flow_stat_streams)
+
+ try:
+ streams = []
+ exp = []
+ # 10 identical streams
+ for pg_id in range(1, num_latency_streams):
+
+ streams.append(STLStream(name = 'rx {0}'.format(pg_id),
+ packet = self.pkt,
+ flow_stats = STLFlowLatencyStats(pg_id = pg_id),
+ mode = STLTXSingleBurst(total_pkts = total_pkts+pg_id, percentage = percent)))
+
+ exp.append({'pg_id': pg_id, 'total_pkts': total_pkts+pg_id, 'pkt_len': streams[-1].get_pkt_len()})
+
+ for pg_id in range(num_latency_streams + 1, num_latency_streams + num_flow_stat_streams):
+
+ streams.append(STLStream(name = 'rx {0}'.format(pg_id),
+ packet = self.pkt,
+ flow_stats = STLFlowStats(pg_id = pg_id),
+ mode = STLTXSingleBurst(total_pkts = total_pkts+pg_id, percentage = percent)))
+
+ exp.append({'pg_id': pg_id, 'total_pkts': total_pkts+pg_id, 'pkt_len': streams[-1].get_pkt_len()})
+
+ # add both streams to ports
+ self.c.add_streams(streams, ports = [self.tx_port])
+
+ self.__rx_iteration(exp)
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+ def test_1_stream_many_iterations (self):
+ total_pkts = self.total_pkts
+
+ try:
+ streams_data = [
+ {'name': 'Flow stat. No latency', 'pkt': self.pkt, 'lat': False},
+ {'name': 'Latency, no field engine', 'pkt': self.pkt, 'lat': True},
+ {'name': 'Latency, short packet with field engine', 'pkt': self.vm_pkt, 'lat': True},
+ {'name': 'Latency, large packet field engine', 'pkt': self.vm_large_pkt, 'lat': True}
+ ]
+ if self.latency_9k_enable:
+ streams_data.append({'name': 'Latency, 9k packet with field engine', 'pkt': self.vm_9k_pkt, 'lat': True})
+
+ if self.ipv6_support:
+ streams_data.append({'name': 'IPv6 flow stat. No latency', 'pkt': self.ipv6pkt, 'lat': False})
+ streams_data.append({'name': 'IPv6 latency, no field engine', 'pkt': self.ipv6pkt, 'lat': True})
+
+ streams = []
+ for data in streams_data:
+ if data['lat']:
+ flow_stats = STLFlowLatencyStats(pg_id = 5)
+ mode = STLTXSingleBurst(total_pkts = total_pkts, percentage = self.rate_percent)
+ else:
+ flow_stats = STLFlowStats(pg_id = 5)
+ mode = STLTXSingleBurst(total_pkts = total_pkts, pps = self.lat_pps)
+
+ s = STLStream(name = data['name'],
+ packet = data['pkt'],
+ flow_stats = flow_stats,
+ mode = mode
+ )
+ streams.append(s)
+
+ print("\ninjecting {0} packets on port {1}".format(total_pkts, self.tx_port))
+ exp = {'pg_id': 5, 'total_pkts': total_pkts}
+
+ for stream in streams:
+ self.c.add_streams([stream], ports = [self.tx_port])
+ print("Stream: {0}".format(stream.name))
+ exp['pkt_len'] = stream.get_pkt_len()
+ for i in range(0, 10):
+ print("Iteration {0}".format(i))
+ self.__rx_iteration( [exp] )
+ self.c.remove_all_streams(ports = [self.tx_port])
+
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+
+ def __9k_stream(self,pgid,ports,precet,max_latency,avg_latency,duration,pkt_size):
+ my_pg_id=pgid
+ s_ports=ports;
+ all_ports=list(CTRexScenario.stl_ports_map['map'].keys());
+ if ports == None:
+ s_ports=all_ports
+ assert( type(s_ports)==list)
+
+ stream_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*pkt_size))
+
+ try:
+ # reset all ports
+ self.c.reset(ports = all_ports)
+
+
+ for pid in s_ports:
+ s1 = STLStream(name = 'rx',
+ packet = self.pkt,
+ flow_stats = STLFlowLatencyStats(pg_id = my_pg_id+pid),
+ mode = STLTXCont(pps = 1000))
+
+ s2 = STLStream(name = 'bulk',
+ packet = stream_pkt,
+ mode = STLTXCont(percentage =precet))
+
+
+ # add both streams to ports
+ self.c.add_streams([s1,s2], ports = [pid])
+
+ self.c.clear_stats()
+
+ self.c.start(ports = s_ports,duration = duration)
+ self.c.wait_on_traffic(ports = s_ports,timeout = duration+10,rx_delay_ms = 100)
+ stats = self.c.get_stats()
+
+ for pid in s_ports:
+ latency_stats = stats['latency'].get(my_pg_id+pid)
+ #pprint.pprint(latency_stats)
+ if self.__verify_latency (latency_stats,max_latency,avg_latency) !=0:
+ return (ERROR_LATENCY_TOO_HIGH);
+
+ return 0
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+
+
+
+ # check low latency when you have stream of 9K stream
+ def test_9k_stream(self):
+ if self.is_virt_nics:
+ self.skip('Skip this for virtual NICs')
+
+ if self.latency_9k_enable == False:
+ print("SKIP")
+ return
+
+ for i in range(0,5):
+ print("Iteration {0}".format(i));
+ duration=random.randint(10, 70);
+ pgid=random.randint(1, 65000);
+ pkt_size=random.randint(1000, 9000);
+ all_ports = list(CTRexScenario.stl_ports_map['map'].keys());
+
+
+ s_port=random.sample(all_ports, random.randint(1, len(all_ports)) )
+ s_port=sorted(s_port)
+ if self.speed == 40 :
+ # the NIC does not support all full rate in case both port works let's filter odd ports
+ s_port=list(filter(lambda x: x % 2==0, s_port))
+ if len(s_port)==0:
+ s_port=[0];
+
+ error=1;
+ for j in range(0,5):
+ print(" {4} - duration {0} pgid {1} pkt_size {2} s_port {3} ".format(duration,pgid,pkt_size,s_port,j));
+ if self.__9k_stream(pgid,
+ s_port,90,
+ self.latency_9k_max_latency,
+ self.latency_9k_max_average,
+ duration,
+ pkt_size)==0:
+ error=0;
+ break;
+
+ if error:
+ assert False , "Latency too high"
+ else:
+ print("===>Iteration {0} PASS {1}".format(i,j));
+
+
+ def check_stats (self,stats,a,b,err):
+ if a != b:
+ tmp = 'ERROR field : {0}, read : {1} != expected : {2} '.format(err,a,b)
+ pprint.pprint(stats)
+ assert False,tmp
+
+
+
+ def send_1_burst(self,from_port,is_latency,pkts):
+
+ pid = from_port
+ base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
+
+ pad = (60 - len(base_pkt)) * 'x'
+
+ stream_pkt = STLPktBuilder(pkt = base_pkt/pad)
+
+ all_ports=list(CTRexScenario.stl_ports_map['map'].keys());
+
+ dpid = CTRexScenario.stl_ports_map['map'][pid]
+
+ s_ports =[pid]
+
+ try:
+ # reset all ports
+ self.c.reset(ports = all_ports)
+
+
+ for pid in s_ports:
+ if is_latency:
+ s1 = STLStream(name = 'rx',
+ packet = stream_pkt,
+ flow_stats = STLFlowLatencyStats(pg_id = 5 + pid),
+ mode = STLTXSingleBurst(total_pkts = pkts,pps = 1000))
+ else:
+ s1 = STLStream(name = 'rx',
+ packet = stream_pkt,
+ mode = STLTXSingleBurst(total_pkts = pkts,pps = 1000))
+
+
+ # add both streams to ports
+ self.c.add_streams(s1, ports = [pid])
+
+ self.c.clear_stats()
+
+ self.c.start(ports = s_ports)
+ self.c.wait_on_traffic(ports = s_ports)
+
+ stats = self.c.get_stats()
+
+ ips = stats[dpid]
+ ops = stats[pid]
+ tps = stats['total']
+ tbytes = pkts*64
+
+ self.check_stats (stats,ops["obytes"], tbytes,"ops[obytes]")
+ self.check_stats (stats,ops["opackets"], pkts,"ops[opackets]")
+
+ self.check_stats (stats,ips["ibytes"], tbytes,"ips[ibytes]")
+ self.check_stats (stats,ips["ipackets"], pkts,"ips[ipackets]")
+
+ self.check_stats (stats,tps['ibytes'], tbytes,"tps[ibytes]")
+ self.check_stats (stats,tps['obytes'], tbytes,"tps[obytes]")
+ self.check_stats (stats,tps['ipackets'], pkts,"tps[ipackets]")
+ self.check_stats (stats,tps['opackets'], pkts,"tps[opackets]")
+
+ if is_latency:
+ ls=stats['flow_stats'][5+ pid]
+ self.check_stats (stats,ls['rx_pkts']['total'], pkts,"ls['rx_pkts']['total']")
+ self.check_stats (stats,ls['rx_pkts'][dpid], pkts,"ls['rx_pkts'][dpid]")
+
+ self.check_stats (stats,ls['tx_pkts']['total'], pkts,"ls['tx_pkts']['total']")
+ self.check_stats (stats,ls['tx_pkts'][pid], pkts,"ls['tx_pkts'][pid]")
+
+ self.check_stats (stats,ls['tx_bytes']['total'], tbytes,"ls['tx_bytes']['total']")
+ self.check_stats (stats,ls['tx_bytes'][pid], tbytes,"ls['tx_bytes'][pid]")
+
+
+ return 0
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
+
+
+
+ def test_fcs_stream(self):
+ """ this test send 1 64 byte packet with latency and check that all counters are reported as 64 bytes"""
+
+ if self.is_virt_nics:
+ self.skip('Skip this for virtual NICs')
+
+ all_ports=list(CTRexScenario.stl_ports_map['map'].keys());
+ for port in all_ports:
+ for l in [True,False]:
+ print(" test port {0} latency : {1} ".format(port,l))
+ self.send_1_burst(port,l,100)
+
+
+ # this test adds more and more latency streams and re-test with incremental
+ def test_incremental_latency_streams (self):
+ if self.is_virt_nics:
+ self.skip('Skip this for virtual NICs')
+
+ total_pkts = self.total_pkts
+ percent = 0.5
+
+ try:
+ # We run till maximum streams allowed. At some point, expecting drops, because rate is too high.
+ # then run with less streams again, to see that system is still working.
+ for num_iter in [128, 5]:
+ exp = []
+ for i in range(1, num_iter):
+ # mix small and large packets
+ if i % 2 != 0:
+ my_pkt = self.pkt
+ else:
+ my_pkt = self.large_pkt
+ s1 = STLStream(name = 'rx',
+ packet = my_pkt,
+ flow_stats = STLFlowLatencyStats(pg_id = i),
+ mode = STLTXSingleBurst(total_pkts = total_pkts,
+ percentage = percent
+ ))
+
+ # add both streams to ports
+ self.c.add_streams([s1], ports = [self.tx_port])
+ total_percent = i * percent
+ if total_percent > self.rate_lat:
+ self.drops_expected = True
+ else:
+ self.drops_expected = False
+
+ print("port {0} : {1} streams at {2}% of line rate\n".format(self.tx_port, i, total_percent))
+
+ exp.append({'pg_id': i, 'total_pkts': total_pkts, 'pkt_len': s1.get_pkt_len()})
+
+ self.__rx_iteration( exp )
+
+ self.c.remove_all_streams(ports = [self.tx_port])
+
+ except STLError as e:
+ assert False , '{0}'.format(e)
diff --git a/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py b/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py
new file mode 100755
index 00000000..14ef36f7
--- /dev/null
+++ b/scripts/automation/regression/stateless_tests/trex_client_pkg_test.py
@@ -0,0 +1,39 @@
+#!/router/bin/python
+from .stl_general_test import CStlGeneral_Test, CTRexScenario
+from misc_methods import run_command
+from nose.plugins.attrib import attr
+
+@attr('client_package')
+class CTRexClientPKG_Test(CStlGeneral_Test):
+ """This class tests TRex client package"""
+
+ def setUp(self):
+ CStlGeneral_Test.setUp(self)
+ # examples connect by their own
+ if CTRexScenario.stl_trex.is_connected():
+ CTRexScenario.stl_trex.disconnect()
+ CStlGeneral_Test.unzip_client_package()
+
+ def tearDown(self):
+ # connect back at end of tests
+ if not CTRexScenario.stl_trex.is_connected():
+ CTRexScenario.stl_trex.connect()
+ CStlGeneral_Test.tearDown(self)
+
+ def run_client_package_stl_example(self, python_version):
+ commands = [
+ 'cd %s' % CTRexScenario.scripts_path,
+ 'source find_python.sh --%s' % python_version,
+ 'which $PYTHON',
+ 'cd trex_client/stl/examples',
+ '$PYTHON stl_imix.py -s %s' % self.configuration.trex['trex_name'],
+ ]
+ return_code, stdout, stderr = run_command("bash -ce '%s'" % '; '.join(commands))
+ if return_code:
+ self.fail('Error in running stf_example using %s: %s' % (python_version, [return_code, stdout, stderr]))
+
+ def test_client_python2(self):
+ self.run_client_package_stl_example(python_version = 'python2')
+
+ def test_client_python3(self):
+ self.run_client_package_stl_example(python_version = 'python3')