From 8bd778d02fc0260b109bb4bb8fc11e3567e9130d Mon Sep 17 00:00:00 2001 From: Yaroslav Brustinov Date: Mon, 6 Mar 2017 23:16:59 +0200 Subject: * add trex22 (Mellanox VF) and trex23(XL710 VF) regression configs * fix test_fcs_stream: it did not fail on virtual NICs at all now accumulating all the errors instread of failing on first one * fix XL710 VF input counters (+4 bytes) Change-Id: I7661b4e14a037e5ee2a9c1bc116d835c9a16df91 Signed-off-by: Yaroslav Brustinov --- .../regression/setups/trex22/benchmark.yaml | 155 +++++++++++++++++++++ .../regression/setups/trex22/config.yaml | 9 ++ .../regression/setups/trex23/benchmark.yaml | 155 +++++++++++++++++++++ .../regression/setups/trex23/config.yaml | 9 ++ .../regression/stateful_tests/trex_general_test.py | 1 + .../regression/stateful_tests/trex_rx_test.py | 2 +- .../stateless_tests/stl_benchmark_test.py | 2 +- .../regression/stateless_tests/stl_client_test.py | 21 +-- .../regression/stateless_tests/stl_rx_test.py | 137 +++++++++--------- 9 files changed, 410 insertions(+), 81 deletions(-) create mode 100644 scripts/automation/regression/setups/trex22/benchmark.yaml create mode 100644 scripts/automation/regression/setups/trex22/config.yaml create mode 100644 scripts/automation/regression/setups/trex23/benchmark.yaml create mode 100644 scripts/automation/regression/setups/trex23/config.yaml diff --git a/scripts/automation/regression/setups/trex22/benchmark.yaml b/scripts/automation/regression/setups/trex22/benchmark.yaml new file mode 100644 index 00000000..b366b3fb --- /dev/null +++ b/scripts/automation/regression/setups/trex22/benchmark.yaml @@ -0,0 +1,155 @@ +################################################################ +#### TRex benchmark configuration file #### +################################################################ + +### stateful ### + +test_jumbo: + multiplier : 2.8 + cores : 1 + bw_per_core : 106.652 + + +test_routing_imix: + multiplier : 0.5 + cores : 1 + bw_per_core : 11.577 + + +test_routing_imix_64: + multiplier : 28 + cores : 1 + bw_per_core : 2.030 + + +test_static_routing_imix_asymmetric: + multiplier : 0.8 + cores : 1 + bw_per_core : 13.742 + + + +### stateless ### + +test_CPU_benchmark: + profiles: + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 64, stream_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 64, stream_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# causes queue full +# - name : stl/udp_for_benchmarks.py +# kwargs : {packet_len: 64, stream_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 9000, stream_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 9000, stream_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# not enough memory + queue full if memory increase +# - name : stl/udp_for_benchmarks.py +# kwargs : {packet_len: 9000, stream_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/imix.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/pcap.py + kwargs : {ipg_usec: 4, loop_count: 0} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/hlt/hlt_udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 + + diff --git a/scripts/automation/regression/setups/trex22/config.yaml b/scripts/automation/regression/setups/trex22/config.yaml new file mode 100644 index 00000000..6885db7e --- /dev/null +++ b/scripts/automation/regression/setups/trex22/config.yaml @@ -0,0 +1,9 @@ +################################################################ +#### TRex configuration file #### +################################################################ + +trex: + hostname : csi-trex-22 + cores : 1 + modes : [VM, loopback, vf_nics] + diff --git a/scripts/automation/regression/setups/trex23/benchmark.yaml b/scripts/automation/regression/setups/trex23/benchmark.yaml new file mode 100644 index 00000000..b366b3fb --- /dev/null +++ b/scripts/automation/regression/setups/trex23/benchmark.yaml @@ -0,0 +1,155 @@ +################################################################ +#### TRex benchmark configuration file #### +################################################################ + +### stateful ### + +test_jumbo: + multiplier : 2.8 + cores : 1 + bw_per_core : 106.652 + + +test_routing_imix: + multiplier : 0.5 + cores : 1 + bw_per_core : 11.577 + + +test_routing_imix_64: + multiplier : 28 + cores : 1 + bw_per_core : 2.030 + + +test_static_routing_imix_asymmetric: + multiplier : 0.8 + cores : 1 + bw_per_core : 13.742 + + + +### stateless ### + +test_CPU_benchmark: + profiles: + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 64, stream_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 64, stream_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# causes queue full +# - name : stl/udp_for_benchmarks.py +# kwargs : {packet_len: 64, stream_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 9000, stream_count: 10} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_for_benchmarks.py + kwargs : {packet_len: 9000, stream_count: 100} + cpu_util : 1 + bw_per_core : 1 + +# not enough memory + queue full if memory increase +# - name : stl/udp_for_benchmarks.py +# kwargs : {packet_len: 9000, stream_count: 1000} +# cpu_util : 1 +# bw_per_core : 1 + + - name : stl/imix.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 64} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 128} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 256} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 512} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 1500} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 4000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_1pkt_tuple_gen.py + kwargs : {packet_len: 9000} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/pcap.py + kwargs : {ipg_usec: 4, loop_count: 0} + cpu_util : 1 + bw_per_core : 1 + + - name : stl/udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 + + - name : stl/hlt/hlt_udp_rand_len_9k.py + cpu_util : 1 + bw_per_core : 1 + + diff --git a/scripts/automation/regression/setups/trex23/config.yaml b/scripts/automation/regression/setups/trex23/config.yaml new file mode 100644 index 00000000..eb8ecd86 --- /dev/null +++ b/scripts/automation/regression/setups/trex23/config.yaml @@ -0,0 +1,9 @@ +################################################################ +#### TRex test configuration file #### +################################################################ + +trex: + hostname : csi-trex-23 + cores : 1 + modes : [loopback, VM, vf_nics] + diff --git a/scripts/automation/regression/stateful_tests/trex_general_test.py b/scripts/automation/regression/stateful_tests/trex_general_test.py index 1f52de07..b4da27f8 100755 --- a/scripts/automation/regression/stateful_tests/trex_general_test.py +++ b/scripts/automation/regression/stateful_tests/trex_general_test.py @@ -70,6 +70,7 @@ class CTRexGeneral_Test(unittest.TestCase): self.unsupported_modes = [] self.is_loopback = True if 'loopback' in self.modes else False self.is_virt_nics = True if 'virt_nics' in self.modes else False + self.is_vf_nics = True if 'vf_nics' in self.modes else False self.is_VM = True if 'VM' in self.modes else False if not CTRexScenario.is_init: diff --git a/scripts/automation/regression/stateful_tests/trex_rx_test.py b/scripts/automation/regression/stateful_tests/trex_rx_test.py index d2050703..7af7f366 100755 --- a/scripts/automation/regression/stateful_tests/trex_rx_test.py +++ b/scripts/automation/regression/stateful_tests/trex_rx_test.py @@ -12,7 +12,7 @@ class CTRexRx_Test(CTRexGeneral_Test): """This class defines the rx testcase of the TRex traffic generator""" def __init__(self, *args, **kwargs): CTRexGeneral_Test.__init__(self, *args, **kwargs) - self.unsupported_modes = ['virt_nics'] # TODO: fix + self.unsupported_modes = ['virt_nics', 'vf_nics'] # TODO: fix (-k argument does not work) def setUp(self): CTRexGeneral_Test.setUp(self) diff --git a/scripts/automation/regression/stateless_tests/stl_benchmark_test.py b/scripts/automation/regression/stateless_tests/stl_benchmark_test.py index fbc58765..e90ad7d9 100755 --- a/scripts/automation/regression/stateless_tests/stl_benchmark_test.py +++ b/scripts/automation/regression/stateless_tests/stl_benchmark_test.py @@ -38,7 +38,7 @@ class STLBenchmark_Test(CStlGeneral_Test): self.stl_trex.clear_stats() sleep(1) self.stl_trex.add_streams(profile) - mult = '1%' if self.is_virt_nics else '10%' + mult = '1%' if (self.is_virt_nics or self.is_vf_nics) else '10%' self.stl_trex.start(mult = mult) start_time = time() diff --git a/scripts/automation/regression/stateless_tests/stl_client_test.py b/scripts/automation/regression/stateless_tests/stl_client_test.py index 73dac734..eb046e38 100644 --- a/scripts/automation/regression/stateless_tests/stl_client_test.py +++ b/scripts/automation/regression/stateless_tests/stl_client_test.py @@ -29,7 +29,7 @@ class STLClient_Test(CStlGeneral_Test): self.pps = 50000 # strict mode is only for 'wire only' connection - self.strict = True if self.is_loopback and not self.is_virt_nics else False + self.strict = True if (self.is_loopback and not self.is_virt_nics) else False assert 'bi' in CTRexScenario.stl_ports_map @@ -269,14 +269,17 @@ class STLClient_Test(CStlGeneral_Test): # but virtual NICs does not support promiscuous mode self.c.set_port_attr(ports = [self.tx_port, self.rx_port], promiscuous = False) - if p1.has_custom_mac_addr(): - if not self.is_virt_nics: - self.c.set_port_attr(ports = [self.tx_port, self.rx_port], promiscuous = True) - else: + if p1.has_custom_mac_addr() or p2.has_custom_mac_addr(): + if self.is_virt_nics: print("\n*** profile needs promiscuous mode but running on virtual NICs - skipping... ***\n") continue + elif self.is_vf_nics: + print("\n*** profile needs promiscuous mode but running on VF - skipping... ***\n") + continue + else: + self.c.set_port_attr(ports = [self.tx_port, self.rx_port], promiscuous = True) - if p1.has_flow_stats(): + if p1.has_flow_stats() or p2.has_flow_stats(): print("\n*** profile needs RX caps - skipping... ***\n") continue @@ -286,14 +289,14 @@ class STLClient_Test(CStlGeneral_Test): self.c.clear_stats() self.c.start(ports = [self.tx_port, self.rx_port], mult = default_mult) - time.sleep(100 / 1000.0) + time.sleep(0.1) if p1.is_pauseable() and p2.is_pauseable(): self.c.pause(ports = [self.tx_port, self.rx_port]) - time.sleep(100 / 1000.0) + time.sleep(0.1) self.c.resume(ports = [self.tx_port, self.rx_port]) - time.sleep(100 / 1000.0) + time.sleep(0.1) self.c.stop(ports = [self.tx_port, self.rx_port]) diff --git a/scripts/automation/regression/stateless_tests/stl_rx_test.py b/scripts/automation/regression/stateless_tests/stl_rx_test.py index 95896d44..abec9402 100644 --- a/scripts/automation/regression/stateless_tests/stl_rx_test.py +++ b/scripts/automation/regression/stateless_tests/stl_rx_test.py @@ -69,7 +69,7 @@ class STLRX_Test(CStlGeneral_Test): 'rate_percent': 80, 'total_pkts': 1000, 'rate_latency': 1, - 'latency_9k_enable': True, + 'latency_9k_enable': False if self.is_vf_nics else True, 'latency_9k_max_average': 100, 'latency_9k_max_latency': 450, #see latency issue trex-261 }, @@ -138,6 +138,7 @@ class STLRX_Test(CStlGeneral_Test): , vm = vm) self.vm_9k_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*9000) ,vm = vm) + self.errs = [] @classmethod @@ -456,121 +457,117 @@ class STLRX_Test(CStlGeneral_Test): print("===>Iteration {0} PASS {1}".format(i,j)); - def check_stats (self,stats,a,b,err): + def check_stats(self, a, b, err): if a != b: tmp = 'ERROR field : {0}, read : {1} != expected : {2} '.format(err,a,b) - pprint.pprint(stats) - assert False,tmp - - + print tmp + self.errs.append(tmp) - def send_1_burst(self,from_port,is_latency,pkts): - pid = from_port + def send_1_burst(self, client_ports, is_latency, pkts): + self.errs = [] base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025) - pad = (60 - len(base_pkt)) * 'x' - stream_pkt = STLPktBuilder(pkt = base_pkt/pad) - all_ports=list(CTRexScenario.stl_ports_map['map'].keys()); - - dpid = CTRexScenario.stl_ports_map['map'][pid] - - s_ports =[pid] - try: # reset all ports - self.c.reset(ports = all_ports) + self.c.reset() - for pid in s_ports: + for c_port in client_ports: if is_latency: s1 = STLStream(name = 'rx', packet = stream_pkt, - flow_stats = STLFlowLatencyStats(pg_id = 5 + pid), - mode = STLTXSingleBurst(total_pkts = pkts,pps = 1000)) + flow_stats = STLFlowLatencyStats(pg_id = 5 + c_port), + mode = STLTXSingleBurst(total_pkts = pkts, pps = 1000)) else: s1 = STLStream(name = 'rx', packet = stream_pkt, - mode = STLTXSingleBurst(total_pkts = pkts,pps = 1000)) + mode = STLTXSingleBurst(total_pkts = pkts, pps = 1000)) # add both streams to ports - self.c.add_streams(s1, ports = [pid]) + self.c.add_streams(s1, ports = [c_port]) self.c.clear_stats() - self.c.start(ports = s_ports) - self.c.wait_on_traffic(ports = s_ports) - + self.c.start(ports = client_ports) + self.c.wait_on_traffic(ports = client_ports) stats = self.c.get_stats() - ips = stats[dpid] - ops = stats[pid] - tps = stats['total'] - tbytes = pkts*64 - - self.check_stats (stats,ops["obytes"], tbytes,"ops[obytes]") - self.check_stats (stats,ops["opackets"], pkts,"ops[opackets]") - - self.check_stats (stats,ips["ibytes"], tbytes,"ips[ibytes]") - self.check_stats (stats,ips["ipackets"], pkts,"ips[ipackets]") + bytes = pkts * 64 + total_pkts = pkts * len(client_ports) + total_bytes = total_pkts * 64 - self.check_stats (stats,tps['ibytes'], tbytes,"tps[ibytes]") - self.check_stats (stats,tps['obytes'], tbytes,"tps[obytes]") - self.check_stats (stats,tps['ipackets'], pkts,"tps[ipackets]") - self.check_stats (stats,tps['opackets'], pkts,"tps[opackets]") - - if is_latency: - ls=stats['flow_stats'][5+ pid] - self.check_stats (stats,ls['rx_pkts']['total'], pkts,"ls['rx_pkts']['total']") - self.check_stats (stats,ls['rx_pkts'][dpid], pkts,"ls['rx_pkts'][dpid]") + tps = stats['total'] + self.check_stats(tps['ibytes'], total_bytes, "tps[ibytes]") + self.check_stats(tps['obytes'], total_bytes, "tps[obytes]") + self.check_stats(tps['ipackets'], total_pkts, "tps[ipackets]") + self.check_stats(tps['opackets'], total_pkts, "tps[opackets]") - self.check_stats (stats,ls['tx_pkts']['total'], pkts,"ls['tx_pkts']['total']") - self.check_stats (stats,ls['tx_pkts'][pid], pkts,"ls['tx_pkts'][pid]") + for c_port in client_ports: + s_port = CTRexScenario.stl_ports_map['map'][c_port] - self.check_stats (stats,ls['tx_bytes']['total'], tbytes,"ls['tx_bytes']['total']") - self.check_stats (stats,ls['tx_bytes'][pid], tbytes,"ls['tx_bytes'][pid]") + ips = stats[s_port] + ops = stats[c_port] + self.check_stats(ops["obytes"], bytes, "stats[%s][obytes]" % c_port) + self.check_stats(ops["opackets"], pkts, "stats[%s][opackets]" % c_port) + + self.check_stats(ips["ibytes"], bytes, "stats[%s][ibytes]" % s_port) + self.check_stats(ips["ipackets"], pkts, "stats[%s][ipackets]" % s_port) - return 0 + if is_latency: + ls = stats['flow_stats'][5 + c_port] + self.check_stats(ls['rx_pkts']['total'], pkts, "ls['rx_pkts']['total']") + self.check_stats(ls['rx_pkts'][s_port], pkts, "ls['rx_pkts'][%s]" % s_port) + + self.check_stats(ls['tx_pkts']['total'], pkts, "ls['tx_pkts']['total']") + self.check_stats(ls['tx_pkts'][c_port], pkts, "ls['tx_pkts'][%s]" % c_port) + + self.check_stats(ls['tx_bytes']['total'], bytes, "ls['tx_bytes']['total']") + self.check_stats(ls['tx_bytes'][c_port], bytes, "ls['tx_bytes'][%s]" % c_port) + + if self.errs: + pprint.pprint(stats) + msg = 'Stats do not match the expected:\n' + '\n'.join(self.errs) + raise Exception(msg) except STLError as e: assert False , '{0}'.format(e) def _run_fcs_stream (self,is_vm): """ this test send 1 64 byte packet with latency and check that all counters are reported as 64 bytes""" - res=True try: - all_ports=list(CTRexScenario.stl_ports_map['map'].keys()); - for port in all_ports: - for l in [True,False]: - print(" test port {0} latency : {1} ".format(port,l)) - self.send_1_burst(port,l,100) + ports = CTRexScenario.stl_ports_map['map'].keys() + for lat in [True, False]: + print("\nSending from ports: {0}, has latency: {1} ".format(ports, lat)) + self.send_1_burst(ports, lat, 100) + print('Success.') + return True except Exception as e: if is_vm : - res=False + return False else: - raise e - return(res); - - + raise +# this test sends 1 64 byte packet with latency and check that all counters are reported as 64 bytes def test_fcs_stream(self): - """ this test send 1 64 byte packet with latency and check that all counters are reported as 64 bytes""" - is_vm=self.is_virt_nics # in case of VM and vSwitch there are drop of packets in some cases, let retry number of times - # in this case we just want to check functionality that packet of 64 is reported as 64 in all levels - retry=1 - if is_vm: - retry=4 - for i in range(0,retry): - if self._run_fcs_stream (is_vm): - break; - print("==> retry %d .." %(i)); + # in case of VM and vSwitch there are drop of packets in some cases, let retry number of times + # in this case we just want to check functionality that packet of 64 is reported as 64 in all levels + is_vm = self.is_virt_nics or self.is_vf_nics + tries = 1 + if is_vm: + tries = 4 + for i in range(tries): + if self._run_fcs_stream(is_vm): + return + print("==> Try number #%d failed ..." % i) + self.fail('\n'.join(self.errs)) # this test adds more and more latency streams and re-test with incremental def test_incremental_latency_streams (self): -- cgit 1.2.3-korg