summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorimarom <imarom@cisco.com>2016-12-13 20:02:45 +0200
committerimarom <imarom@cisco.com>2016-12-13 20:02:45 +0200
commitac1c2d18f699c40b974d314df3db835ec26bc0bd (patch)
tree44b73dacf0be5809817d5d675a2be8a6d85ce357
parent603bd7a4d9dcb8058812633ac60a75598578cb83 (diff)
parentfe755604ec04ed7f8622394f99f0048901dad4e1 (diff)
Merge branch 'master' into rx_phase_2
-rwxr-xr-xdoc/AnalyticsConnect.py182
-rwxr-xr-xdoc/AnalyticsWebReport.py9
-rw-r--r--doc/README17
-rwxr-xr-xdoc/TRexDataAnalysis.py63
-rwxr-xr-xdoc/trex_analytics.asciidoc26
-rwxr-xr-xdoc/trex_book.asciidoc131
-rw-r--r--doc/trex_faq.asciidoc37
-rw-r--r--doc/trex_index.asciidoc2
-rwxr-xr-xdoc/ws_main.py8
-rw-r--r--external_libs/ibverbs/include/infiniband/mlx5_hw.h1
-rwxr-xr-xlinux_dpdk/ws_main.py12
-rw-r--r--scripts/automation/regression/setups/trex09/benchmark.yaml2
-rw-r--r--scripts/automation/regression/setups/trex11/benchmark.yaml13
-rw-r--r--scripts/automation/regression/setups/trex14/BU/benchmark.yaml245
-rw-r--r--scripts/automation/regression/setups/trex14/BU/config.yaml67
-rw-r--r--scripts/automation/regression/setups/trex14/benchmark.yaml21
-rw-r--r--scripts/automation/regression/setups/trex14/config.yaml15
-rwxr-xr-xscripts/automation/regression/stateful_tests/trex_general_test.py13
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py34
-rw-r--r--scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/zipmsg.py16
-rwxr-xr-xscripts/dpdk_setup_ports.py14
-rw-r--r--scripts/ko/3.10.0-327.el7.x86_64/igb_uio.kobin0 -> 236751 bytes
-rwxr-xr-xscripts/trex-cfg24
-rw-r--r--src/dpdk/drivers/net/enic/base/vnic_dev.c5
-rw-r--r--src/dpdk/drivers/net/enic/enic_clsf.c55
-rw-r--r--src/dpdk/drivers/net/mlx5/mlx5_rxtx.c4
-rw-r--r--src/main_dpdk.cpp300
-rw-r--r--src/main_dpdk.h3
-rw-r--r--src/rpc-server/trex_rpc_jsonrpc_v2_parser.cpp9
-rw-r--r--src/rpc-server/trex_rpc_jsonrpc_v2_parser.h9
-rw-r--r--src/rpc-server/trex_rpc_req_resp_server.cpp24
-rw-r--r--src/rpc-server/trex_rpc_req_resp_server.h1
32 files changed, 1043 insertions, 319 deletions
diff --git a/doc/AnalyticsConnect.py b/doc/AnalyticsConnect.py
index 10619532..bb473c52 100755
--- a/doc/AnalyticsConnect.py
+++ b/doc/AnalyticsConnect.py
@@ -21,112 +21,117 @@ VIEW_ID = '120207451'
def initialize_analyticsreporting():
- """Initializes an analyticsreporting service object.
-
- Returns:
- analytics an authorized analyticsreporting service object.
- """
-
- credentials = ServiceAccountCredentials.from_p12_keyfile(
- SERVICE_ACCOUNT_EMAIL, KEY_FILE_LOCATION, scopes=SCOPES)
-
- http = credentials.authorize(httplib2.Http())
-
- # Build the service object.
- analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI)
-
- return analytics
-
-
-def get_report(analytics,start_date='2016-11-27',end_date='2016-11-27'):
- # Use the Analytics Service Object to query the Analytics Reporting API V4.
- return analytics.reports().batchGet(
- body={
- 'reportRequests': [
- {
- 'viewId': VIEW_ID,
- 'dateRanges': [{'startDate': start_date, 'endDate': end_date}],
- 'metrics': [{'expression': 'ga:metric1','formattingType':'CURRENCY'},
- {'expression': 'ga:metric2','formattingType':'CURRENCY'},
- {'expression': 'ga:metric3','formattingType':'CURRENCY'},
- {'expression': 'ga:totalEvents'}],
- 'dimensions': [{"name":"ga:eventAction"},{"name": "ga:dimension1"},{"name": "ga:dimension2"},{"name": "ga:dimension3"},{"name": "ga:dimension4"}],
- 'pageSize': 10000
+ """Initializes an analyticsreporting service object.
+
+ Returns:
+ analytics an authorized analyticsreporting service object.
+ """
+
+ credentials = ServiceAccountCredentials.from_p12_keyfile(
+ SERVICE_ACCOUNT_EMAIL, KEY_FILE_LOCATION, scopes=SCOPES)
+
+ http = credentials.authorize(httplib2.Http())
+
+ # Build the service object.
+ analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI)
+
+ return analytics
+
+
+def get_report(analytics, start_date='2016-11-27', end_date='2016-11-27'):
+ # Use the Analytics Service Object to query the Analytics Reporting API V4.
+ return analytics.reports().batchGet(
+ body={
+ 'reportRequests': [
+ {
+ 'viewId': VIEW_ID,
+ 'dateRanges': [{'startDate': start_date, 'endDate': end_date}],
+ 'metrics': [{'expression': 'ga:metric1', 'formattingType': 'CURRENCY'},
+ {'expression': 'ga:metric2', 'formattingType': 'CURRENCY'},
+ {'expression': 'ga:metric3', 'formattingType': 'CURRENCY'},
+ {'expression': 'ga:totalEvents'}],
+ 'dimensions': [{"name": "ga:eventAction"}, {"name": "ga:dimension1"}, {"name": "ga:dimension2"},
+ {"name": "ga:dimension3"},
+ {"name": "ga:date"}, {"name": "ga:hour"}, {"name": "ga:minute"}],
+ 'pageSize': 10000
+ }
+ ]
}
- ]
- }
- ).execute()
+ ).execute()
def print_response(response):
- """Parses and prints the Analytics Reporting API V4 response"""
-
- for report in response.get('reports', []):
- columnHeader = report.get('columnHeader', {})
- dimensionHeaders = columnHeader.get('dimensions', [])
- metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])
- rows = report.get('data', {}).get('rows', [])
+ """Parses and prints the Analytics Reporting API V4 response"""
- for row in rows:
- dimensions = row.get('dimensions', [])
- dateRangeValues = row.get('metrics', [])
+ for report in response.get('reports', []):
+ columnHeader = report.get('columnHeader', {})
+ dimensionHeaders = columnHeader.get('dimensions', [])
+ metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])
+ rows = report.get('data', {}).get('rows', [])
- for header, dimension in zip(dimensionHeaders, dimensions):
- print header + ': ' + dimension
+ for row in rows:
+ dimensions = row.get('dimensions', [])
+ dateRangeValues = row.get('metrics', [])
- for i, values in enumerate(dateRangeValues):
- print 'Date range (' + str(i) + ')'
- for metricHeader, value in zip(metricHeaders, values.get('values')):
- print metricHeader.get('name') + ': ' + value
+ for header, dimension in zip(dimensionHeaders, dimensions):
+ print header + ': ' + dimension
+ for i, values in enumerate(dateRangeValues):
+ print 'Date range (' + str(i) + ')'
+ for metricHeader, value in zip(metricHeaders, values.get('values')):
+ print metricHeader.get('name') + ': ' + value
def export_to_tuples(response):
- # counter = 0
- setups = set()
- df = {}
- for report in response.get('reports', []):
- rows = report.get('data', {}).get('rows', [])
- for row in rows:
- data = []
- dimensions = row.get('dimensions', [])
- # print 'this is dimensions'
- # print dimensions
- data.append(dimensions[1]) #test name
- data.append(dimensions[2]) # state
- # data.append(dimensions[3]) # setup
- data.append(dimensions[4]) # test_type
- dateRangeValues = row.get('metrics', [])
- value = dateRangeValues[0].get('values',[])[0] #MPPS
- golden_min = dateRangeValues[0].get('values',[])[1] #golden min
- golden_max = dateRangeValues[0].get('values',[])[2] #golden max
- data.append(value)
- data.append(golden_min)
- data.append(golden_max)
- if dimensions[3] in setups:
- if dimensions[1] in df[dimensions[3]]:
- df[dimensions[3]][dimensions[1]].append(tuple(data))
- else:
- df[dimensions[3]][dimensions[1]] = [tuple(data)]
- else:
- df[dimensions[3]] = {}
- df[dimensions[3]][dimensions[1]] = [tuple(data)]
- setups.add(dimensions[3])
- # print 'counter is: %d' % counter
- return df, setups
+ # counter = 0
+ setups = set()
+ df = {}
+ for report in response.get('reports', []):
+ rows = report.get('data', {}).get('rows', [])
+ for row in rows:
+ data = []
+ dimensions = row.get('dimensions', [])
+ # print 'this is dimensions'
+ # print dimensions
+ data.append(dimensions[1]) # test name
+ data.append(dimensions[2]) # state
+ # data.append(dimensions[3]) # setup
+ data.append(dimensions[4]) # date in YYYYMMDD format
+ data.append(dimensions[5]) # hour
+ data.append(dimensions[6]) # minute
+ dateRangeValues = row.get('metrics', [])
+ value = dateRangeValues[0].get('values', [])[0] # MPPS
+ golden_min = dateRangeValues[0].get('values', [])[1] # golden min
+ golden_max = dateRangeValues[0].get('values', [])[2] # golden max
+ data.append(value)
+ # counter += 1
+ data.append(golden_min)
+ data.append(golden_max)
+ data.append(dimensions[0]) # build id
+ if dimensions[3] in setups:
+ if dimensions[1] in df[dimensions[3]]:
+ df[dimensions[3]][dimensions[1]].append(tuple(data))
+ else:
+ df[dimensions[3]][dimensions[1]] = [tuple(data)]
+ else:
+ df[dimensions[3]] = {}
+ df[dimensions[3]][dimensions[1]] = [tuple(data)]
+ setups.add(dimensions[3])
+ # print 'counter is: %d' % counter
+ return df, setups
def main():
- analytics = initialize_analyticsreporting()
- response = get_report(analytics)
- df, setups = export_to_tuples(response)
- # pprint(df)
- return df,setups
+ analytics = initialize_analyticsreporting()
+ response = get_report(analytics)
+ df, setups = export_to_tuples(response)
+ # pprint(df)
+ return df, setups
+
if __name__ == '__main__':
main()
-
"""
response structure (when fetched with "export to tuples"):
@@ -188,4 +193,3 @@ response structure (when fetched with "export to tuples"):
"""
-
diff --git a/doc/AnalyticsWebReport.py b/doc/AnalyticsWebReport.py
index bd4a9a2b..1806cab9 100755
--- a/doc/AnalyticsWebReport.py
+++ b/doc/AnalyticsWebReport.py
@@ -6,7 +6,7 @@ import time
import datetime
-def main(verbose = False):
+def main(verbose=False, detailed_test_stats=''):
if verbose:
print('Retrieving data from Google Analytics')
analytics = ac.initialize_analyticsreporting()
@@ -18,10 +18,13 @@ def main(verbose = False):
dest_path = os.path.join(os.getcwd(), 'build', 'images')
if verbose:
print('Saving data to %s' % dest_path)
- tr.create_all_data(ga_all_data_dict, setups, start_date, current_date, save_path = dest_path,
- add_stats='yes')
+ if detailed_test_stats:
+ print('generating detailed table for test results')
+ tr.create_all_data(ga_all_data_dict, setups, start_date, current_date, save_path=dest_path,
+ add_stats='yes', detailed_test_stats=detailed_test_stats)
if verbose:
print('Done without errors.')
+
if __name__ == "__main__":
main()
diff --git a/doc/README b/doc/README
new file mode 100644
index 00000000..8c630ff0
--- /dev/null
+++ b/doc/README
@@ -0,0 +1,17 @@
+How to build the docs:
+
+You will need following packages installed:
+
+ * asciidoc
+ * dblatex
+ * source-highlight
+ * python-sphinx
+
+(apt install ...) for Ubuntu
+(yum install ...) for Fedora/CentOS
+
+Then,
+
+./b configure
+./b build
+
diff --git a/doc/TRexDataAnalysis.py b/doc/TRexDataAnalysis.py
index fb855a16..ed674262 100755
--- a/doc/TRexDataAnalysis.py
+++ b/doc/TRexDataAnalysis.py
@@ -2,34 +2,46 @@
import pandas as pd
import numpy as np
import matplotlib
+
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import os
+import time
-def generate_dframe_for_test(test_name, test_data):
+def generate_dframe_for_test(setup_name, test_name, test_data):
test_results = []
+ test_dates = []
+ test_build_ids = []
test_mins = set()
test_maxs = set()
for query in test_data:
- test_results.append(float(query[3]))
- test_mins.add(float(query[4]))
- test_maxs.add(float(query[5]))
+ test_results.append(float(query[5]))
+ date_formatted = time.strftime("%d-%m-%Y", time.strptime(query[2], "%Y%m%d"))
+ time_of_res = date_formatted + '-' + query[3] + ':' + query[4]
+ test_dates.append(time_of_res)
+ test_build_ids.append(query[8])
+ test_mins.add(float(query[6]))
+ test_maxs.add(float(query[7]))
df = pd.DataFrame({test_name: test_results})
+ df_detailed = pd.DataFrame({(test_name + ' Results'): test_results, (test_name + ' Date'): test_dates,
+ "Setup": ([setup_name] * len(test_results)), "Build Id": test_build_ids})
stats = tuple([float(df.mean()), min(test_mins), max(test_maxs)]) # stats = (avg_mpps,min,max)
- return df, stats
+ return df, stats, df_detailed
def generate_dframe_arr_and_stats_of_tests_per_setup(date, setup_name, setup_dict):
dframe_arr_trend = []
stats_arr = []
dframe_arr_latest = []
+ dframe_arr_detailed = []
test_names = setup_dict.keys()
for test in test_names:
- df, stats = generate_dframe_for_test(test, setup_dict[test])
+ df, stats, df_detailed = generate_dframe_for_test(setup_name, test, setup_dict[test])
+ dframe_arr_detailed.append(df_detailed)
dframe_arr_trend.append(df)
stats_arr.append(stats)
- df_latest = float(setup_dict[test][-1][3])
+ df_latest = float(setup_dict[test][-1][6])
dframe_arr_latest.append(df_latest)
dframe_arr_latest = pd.DataFrame({'Date': [date] * len(dframe_arr_latest),
'Setup': [setup_name],
@@ -38,7 +50,7 @@ def generate_dframe_arr_and_stats_of_tests_per_setup(date, setup_name, setup_dic
index=range(1, len(dframe_arr_latest) + 1))
stats_df = pd.DataFrame(stats_arr, index=setup_dict.keys(), columns=['Avg MPPS', 'Golden Min', 'Golden Max'])
stats_df.index.name = 'Test Name'
- return dframe_arr_trend, stats_df, dframe_arr_latest
+ return dframe_arr_trend, stats_df, dframe_arr_latest, dframe_arr_detailed
def create_plot_for_dframe_arr(dframe_arr, setup_name, start_date, end_date, show='no', save_path='',
@@ -78,20 +90,43 @@ def create_bar_plot_for_latest_runs_per_setup(dframe_all_tests_latest, setup_nam
plt.show()
-def create_all_data_per_setup(setup_dict, setup_name, start_date, end_date, show='no', save_path='', add_stats=''):
- dframe_arr, stats_arr, dframe_latest_arr = generate_dframe_arr_and_stats_of_tests_per_setup(end_date, setup_name,
- setup_dict)
+def create_all_data_per_setup(setup_dict, setup_name, start_date, end_date, show='no', save_path='', add_stats='',
+ detailed_test_stats=''):
+ dframe_arr, stats_arr, dframe_latest_arr, dframe_detailed = generate_dframe_arr_and_stats_of_tests_per_setup(
+ end_date, setup_name,
+ setup_dict)
+ if detailed_test_stats:
+ detailed_table = create_detailed_table(dframe_detailed, setup_name, save_path)
+ else:
+ detailed_table = []
create_bar_plot_for_latest_runs_per_setup(dframe_latest_arr, setup_name, show=show, save_path=save_path)
create_plot_for_dframe_arr(dframe_arr, setup_name, start_date, end_date, show, save_path)
if add_stats:
stats_arr = stats_arr.round(2)
stats_arr.to_csv(os.path.join(save_path, setup_name + '_trend_stats.csv'))
plt.close('all')
+ return detailed_table
+
+
+def create_detailed_table(dframe_arr_detailed, setup_name, save_path=''):
+ result = reduce(lambda x, y: pd.merge(x, y, on=('Build Id', 'Setup')), dframe_arr_detailed)
+ return result
-def create_all_data(ga_data, setup_names, start_date, end_date, save_path='', add_stats=''):
+# WARNING: if the file _all_stats.csv already exists, this script deletes it, to prevent overflowing of data
+# since data is appended to the file
+def create_all_data(ga_data, setup_names, start_date, end_date, save_path='', add_stats='', detailed_test_stats=''):
+ total_detailed_data = []
+ if detailed_test_stats:
+ if os.path.exists(os.path.join(save_path, '_detailed_table.csv')):
+ os.remove(os.path.join(save_path, '_detailed_table.csv'))
for setup_name in setup_names:
if setup_name == 'trex11':
continue
- create_all_data_per_setup(ga_data[setup_name], setup_name, start_date, end_date, show='no', save_path=save_path,
- add_stats=add_stats)
+ detailed_setup_data = create_all_data_per_setup(ga_data[setup_name], setup_name, start_date, end_date,
+ show='no', save_path=save_path,
+ add_stats=add_stats, detailed_test_stats=detailed_test_stats)
+ total_detailed_data.append(detailed_setup_data)
+ if detailed_test_stats:
+ total_detailed_dframe = pd.DataFrame().append(total_detailed_data)
+ total_detailed_dframe.to_csv(os.path.join(save_path, '_detailed_table.csv'))
diff --git a/doc/trex_analytics.asciidoc b/doc/trex_analytics.asciidoc
index 35c3a3e4..5f4cbfe0 100755
--- a/doc/trex_analytics.asciidoc
+++ b/doc/trex_analytics.asciidoc
@@ -26,11 +26,9 @@ endif::backend-xhtml11[]
.Setup Details
[options='header',halign='center',cols="1,5"]
|=================
-| Server: | UCSC-C240-M4SX
| CPU: | 2 x Intel(R) Xeon(R) CPU E5-2667 v3 @ 3.20GHz
-| RAM: | 65536 @ 2133 MHz
-| NICs: | 2 x Intel Corporation Ethernet Controller X710
-| OS: | Fedora 18
+| NICs: | 1 x 100G (2 interfaces) Mellanox ConnectX-4
+| OS: | CentOS 7
|=================
image:images/trex07_latest_test_runs.png[title="trex07 test runs",align="left",width={p_width}, link="images/trex07_latest_test_runs.png"]
@@ -52,10 +50,8 @@ include::build/images/trex07_trend_stats.csv[]
.Setup Details
[options='header',halign='center',cols="1,5"]
|=================
-| Server: | UCSC-C240-M4SX
| CPU: | 2 x Intel(R) Xeon(R) CPU E5-2667 v3 @ 3.20GHz
-| RAM: | 65536 @ 2133 MHz
-| NICs: | 2 x Intel Corporation Ethernet Controller X710
+| NICs: | 2 x 40G (4 interfaces) Intel XL710
| OS: | Fedora 18
|=================
@@ -79,9 +75,7 @@ include::build/images/trex08_trend_stats.csv[]
.Setup Details
[options='header',halign='center',cols="1,5"]
|=================
-| Server: | UCSC-C240-M4SX
| CPU: | 2 x Intel(R) Xeon(R) CPU E5-2667 v3 @ 3.20GHz
-| RAM: | 65536 @ 2133 MHz
| NICs: | 2x10G (X710) (8 interfaces)
| OS: | Fedora 18
|=================
@@ -105,11 +99,12 @@ include::build/images/trex09_trend_stats.csv[]
//////////////////////////////////////////////////////////
== Setup: TRex11
.Setup Details
-[options='header',halign='center']
-|====================================================================================================================
-|Name |OS |NICs |Routers
-| trex11 | Fedora 18| 2x10G (X710) (8 interfaces), 1x10G (2 interfaces), 1x1G (4 interfaces) | Loopback on X710 + ASA 5520 + ASA 5512 + ASA 5585-ssp10
-|====================================================================================================================
+[options='header',halign='center',cols="1,5"]
+|=================
+| CPU: | 2 x Intel(R) Xeon(R) CPU E5-2667 v3 @ 3.20GHz
+| NICs: | 1x40G (2 interfaces) Cisco VIC
+| OS: | Ubuntu 14
+|=================
image:images/trex11_latest_test_runs.png[title="trex11 test runs",align="left",width={p_width}, link="images/trex11_latest_test_runs.png"]
@@ -132,8 +127,7 @@ include::build/images/trex11_trend_stats.csv[]
[options='header',halign='center',cols="1,5"]
|=================
| CPU: | 2 x Intel(R) Xeon(R) CPU E5-2650 0 @ 2.00GHz
-| RAM: | 31 Gib
-| NICs: | 2x10G (X710) (4 interfaces)
+| NICs: | 2x10G (4 interfaces) Intel 82599EB
| OS: | Fedora 18
|=================
diff --git a/doc/trex_book.asciidoc b/doc/trex_book.asciidoc
index 66b9d4a9..4129837f 100755
--- a/doc/trex_book.asciidoc
+++ b/doc/trex_book.asciidoc
@@ -2094,36 +2094,35 @@ sudo arp -s 172.168.0.100 <TRex side the NICs are not visible to ifconfig, run:
anchor:connectx_support[]
Mellanox ConnectX-4 adapter family supports 100/56/40/25/10 Gb/s Ethernet speeds.
-Its DPDK support is a bit different from Intel DPDK support, more information can be found link:http://dpdk.org/doc/guides/nics/mlx5.html[DPDK support].
-Intel NICs does not require kernel drivers (except dpdk igb_uio which already supported) while ConnectX-4 works on top of Infinibad API (verbs) and require a kernel modules/user space libs.
-This means that it is required to install OFED package to be able to work with the NIC.
-Installing OFED is the simplest way to make it work (trying to install part of the package can work too but didn't work for us).
-The advantage of this model that you can control it throw Linux driver (ethtol can still work, you will be able to ifconfig it).
+Its DPDK support is a bit different from Intel DPDK support, more information can be found link:http://dpdk.org/doc/guides/nics/mlx5.html[here].
+Intel NICs do not require additional kernel drivers (except for igb_uio which is already supported in most distributions). ConnectX-4 works on top of Infiniband API (verbs) and requires special kernel modules/user space libs.
+This means that it is required to install OFED package to be able to work with this NIC.
+Installing the full OFED package is the simplest way to make it work (trying to install part of the package can work too but didn't work for us).
+The advantage of this model is that you can control it using standard Linux tools (ethtool and ifconfig will work).
The disadvantage is the OFED dependency.
==== Installation
==== Install Linux
-The following distro were tested with TRex and OFED, others might work
+We tested the following distro with TRex and OFED. Others might work too.
-* Ubuntu 14.04.3 LTS (GNU/Linux 3.19.0-25-generic x86_64)
+* CentOS 7.2
-
-The following distro were tested and did *not* work for us
+Following distro was tested and did *not* work for us.
* Fedora 21 (3.17.4-301.fc21.x86_64)
+* Ubuntu 14.04.3 LTS (GNU/Linux 3.19.0-25-generic x86_64) -- crash when RSS was enabled link:https://trex-tgn.cisco.com/youtrack/issue/trex-294[MLX RSS issue]
==== Install OFED
-The information was taken from link:http://www.mellanox.com/page/products_dyn?product_family=26&mtag=linux_sw_drivers[Install OFED]
+Information was taken from link:http://www.mellanox.com/page/products_dyn?product_family=26&mtag=linux_sw_drivers[Install OFED]
-.Download 3.4-1 OFED tar for your distro
-link::http://www.mellanox.com/page/products_dyn?product_family=26&mtag=linux_sw_drivers[download]
+* Download 3.4-2 OFED tar for your distro
[IMPORTANT]
=====================================
-it must be version *MLNX_OFED_LINUX-3.4-1*
+The version must be *MLNX_OFED_LINUX-3.4-2*
=====================================
[IMPORTANT]
@@ -2134,15 +2133,15 @@ Make sure you have an internet connection without firewalls for HTTPS/HTTP - req
.Verify md5
[source,bash]
----
-$md5sum MLNX_OFED_LINUX-3.4-1.0.0.0-ubuntu14.04-x86_64.tgz
-b3c17dc0ea64fd1f0892d7b7ba7e45f3 MLNX_OFED_LINUX-3.4-1.0.0.0-ubuntu14.04-x86_64.tgz
+$md5sum md5sum MLNX_OFED_LINUX-3.4-2.0.0.0-rhel7.2-x86_64.tgz
+58b9fb369d7c62cedbc855661a89a9fd MLNX_OFED_LINUX-3.4-2.0.0.0-rhel7.2-x86_64.tgz
----
.Open the tar
[source,bash]
----
-$tar -xvzf MLNX_OFED_LINUX-3.4-1.0.0.0-ubuntu14.04-x86_64.tgz
-$cd MLNX_OFED_LINUX-3.4-1.0.0.0-ubuntu14.04-x86_64
+$tar -xvzf MLNX_OFED_LINUX-3.4-2.0.0.0-rhel7.2-x86_64.tgz
+$cd MLNX_OFED_LINUX-3.4-2.0.0.0-rhel7.2-x86_64
----
.Run Install script
@@ -2424,20 +2423,79 @@ mlx5_0 port 1 ==> eth6 (Down)
mlx5_1 port 1 ==> eth7 (Down)
-----
+.find the ports
+[source,bash]
+-----
+
+ $sudo ./dpdk_setup_ports.py -t
+ +----+------+---------++---------------------------------------------
+ | ID | NUMA | PCI || Name | Driver |
+ +====+======+=========++===============================+===========+=
+ | 0 | 0 | 06:00.0 || VIC Ethernet NIC | enic |
+ +----+------+---------++-------------------------------+-----------+-
+ | 1 | 0 | 07:00.0 || VIC Ethernet NIC | enic |
+ +----+------+---------++-------------------------------+-----------+-
+ | 2 | 0 | 0a:00.0 || 82599ES 10-Gigabit SFI/SFP+ Ne| ixgbe |
+ +----+------+---------++-------------------------------+-----------+-
+ | 3 | 0 | 0a:00.1 || 82599ES 10-Gigabit SFI/SFP+ Ne| ixgbe |
+ +----+------+---------++-------------------------------+-----------+-
+ | 4 | 0 | 0d:00.0 || Device 15d0 | |
+ +----+------+---------++-------------------------------+-----------+-
+ | 5 | 0 | 10:00.0 || I350 Gigabit Network Connectio| igb |
+ +----+------+---------++-------------------------------+-----------+-
+ | 6 | 0 | 10:00.1 || I350 Gigabit Network Connectio| igb |
+ +----+------+---------++-------------------------------+-----------+-
+ | 7 | 1 | 85:00.0 || 82599ES 10-Gigabit SFI/SFP+ Ne| ixgbe |
+ +----+------+---------++-------------------------------+-----------+-
+ | 8 | 1 | 85:00.1 || 82599ES 10-Gigabit SFI/SFP+ Ne| ixgbe |
+ +----+------+---------++-------------------------------+-----------+-
+ | 9 | 1 | 87:00.0 || MT27700 Family [ConnectX-4] | mlx5_core | #<1>
+ +----+------+---------++-------------------------------+-----------+-
+ | 10 | 1 | 87:00.1 || MT27700 Family [ConnectX-4] | mlx5_core | #<2>
+ +----+------+---------++---------------------------------------------
+-----
+<1> ConnectX-4 port 0
+<2> ConnectX-4 port 1
+
+
+.Config file example
+[source,bash]
+-----
+### Config file generated by dpdk_setup_ports.py ###
+
+ - port_limit: 2
+ version: 2
+ interfaces: ['87:00.0', '87:00.1']
+ port_info:
+ - ip: 1.1.1.1
+ default_gw: 2.2.2.2
+ - ip: 2.2.2.2
+ default_gw: 1.1.1.1
+
+ platform:
+ master_thread_id: 0
+ latency_thread_id: 1
+ dual_if:
+ - socket: 1
+ threads: [8,9,10,11,12,13,14,15,24,25,26,27,28,29,30,31]
+-----
+
+
+
==== TRex specific implementation details
TRex uses flow director filter to steer specific packets to specific queues.
-To support that we change IPv4.TOS/Ipv6.TC LSB to *1* to be steered. So latency packets will have this bit turn on (not only for ConnectX-4)
-Watch out, In case DUT will clear this bit (change the TOS with LSB==0, e.g. 0x3->0x2) packets won't be forward to TRex.
+To support that, we change IPv4.TOS/Ipv6.TC LSB to *1* for packets we want to handle by software (Other packets will be dropped). So latency packets will have this bit turned on (This is true for all NIC types, not only for ConnectX-4).
+This means taht if the DUT for some reason clears this bit (change TOS LSB to 0, e.g. change it from 0x3 to 0x2 for example) some TRex features (latency measurement for example) will not work properly.
==== Which NIC to buy?
-NIC with two ports will work better from performance prospective, so it is better to have MCX456A-ECAT(two 100gb port) and *not* the MCX455A-ECAT (one 100gb port).
+NIC with two ports will work better from performance prospective, so it is better to have MCX456A-ECAT(dual 100gb ports) and *not* the MCX455A-ECAT (single 100gb port).
==== Limitation/Issues
-* Stateless per stream statistic is not supported yet
+* Stateless mode ``per stream statistics'' feature is handled in software (No hardware support like in X710 card).
* link:https://trex-tgn.cisco.com/youtrack/issue/trex-260[64B performance issue]
* link:https://trex-tgn.cisco.com/youtrack/issue/trex-261[Latency issue]
* link:https://trex-tgn.cisco.com/youtrack/issue/trex-262[Statful RX out of order]
@@ -2446,25 +2504,25 @@ NIC with two ports will work better from performance prospective, so it is bette
==== Performance Cycles/Packet ConnectX-4 vs Intel XL710
-For version TRex v2.11, this is the comparison results between XL710 and ConnectX-4 for various scenarios
+For TRex version v2.11, these are the comparison results between XL710 and ConnectX-4 for various scenarios.
-.Stateless MPPS/Core [Perlimeniary]
+.Stateless MPPS/Core [Preliminary]
image:images/xl710_vs_mlx5_64b.png[title="Stateless 64B"]
-.Stateless Gb/Core [Perlimeniary]
+.Stateless Gb/Core [Preliminary]
image:images/xl710_vs_mlx5_var_size.png[title="Stateless variable size packet"]
*Comments*::
-1. For Stateless 64B profiles ConnectX-4 cost 50-90% more cycles per packet (it is actually even more because there is the TRex scheduler overhead) - it means that in the worst case scenario you will need x2 CPU for the same total MPPS
-2. For Stateless/Stateful 256B profiles, ConnectX-4 cost half of the cycles per packets. ConnectX-4 probably can handle in a better way chained mbuf (scatter gather).
-3. In Average Stateful senario ConnectX-4 will be slightly better.
-4. MLX5 can reach ~90MPPS while XL710 limited to 35MPPS
+1. For Stateless 64B profiles, ConnectX-4 uses 50-90% more CPU cycles per packet (it is actually even more because there is the TRex scheduler overhead) - it means that in worst case scenario, you will need x2 CPU for the same total MPPS.
+2. For Stateless/Stateful 256B profiles, ConnectX-4 uses half of the CPU cycles per packet. ConnectX-4 probably can handle in a better way chained mbufs (scatter gather).
+3. In average stateful senario, ConnectX-4 is slightly better.
+4. MLX5 can reach ~90MPPS while XL710 is limited to 35MPPS.
[NOTE]
=====================================
-There is a task to automate the production of this reports
+There is a task to automate the production of thess reports
=====================================
==== Troubleshooting
@@ -2478,11 +2536,11 @@ There is a task to automate the production of this reports
anchor:ciscovic_support[]
-* Supported from TRex version v2.11
-* Only 1300 series Cisco adapter
-* Firmware version 2.0(13) for UCS C-series servers, GA in Febuary 2017.
-* Firmware version 3.1(2) for blade servers supports more filtering capabilities.
-* The feature can be enabled via Cisco CIMC or USCM with the 'advanced filters' radio button. When enabled, the these additional flow director modes are available:
+* Supported from TRex version v2.12
+* Only 1300 series Cisco adapter supported
+* Must have VIC firmware version 2.0(13) for UCS C-series servers. Will be GA in Febuary 2017.
+* Must have VIC firmware version 3.1(2) for blade servers (which supports more filtering capabilities).
+* The feature can be enabled via Cisco CIMC or USCM with the 'advanced filters' radio button. When enabled, these additional flow director modes are available:
RTE_ETH_FLOW_NONFRAG_IPV4_OTHER
RTE_ETH_FLOW_NONFRAG_IPV4_SCTP
RTE_ETH_FLOW_NONFRAG_IPV6_UDP
@@ -2491,10 +2549,9 @@ anchor:ciscovic_support[]
RTE_ETH_FLOW_NONFRAG_IPV6_OTHER
-==== Limitation/Issues
+==== Limitations/Issues
-* Stateless per stream statistic is not supported yet
-* link:https://trex-tgn.cisco.com/youtrack/issue/trex-265[flow-director]
+* Stateless mode ``per stream statistics'' feature is handled in software (No hardware support like in X710 card).
* link:https://trex-tgn.cisco.com/youtrack/issue/trex-272[QSFP+ issue]
diff --git a/doc/trex_faq.asciidoc b/doc/trex_faq.asciidoc
index 44f4f237..8d97aee1 100644
--- a/doc/trex_faq.asciidoc
+++ b/doc/trex_faq.asciidoc
@@ -77,7 +77,8 @@ Limitations:
2. We have regression tests in our lab for each recommended NIC. We do not claim to support NICs we do not have in our lab.
==== Is Cisco VIC supported?
-No. Currently its DPDK driver does not support the capabilities needed to run TRex.
+Yes. Since version 2.12, with link:trex_manual.html#_cisco_vic_support[these limitations]. Especially note that
+a new firmware version is needed.
==== Is 100Gbs NIC QSFP+ supported?
Not yet. Support for FM10K and Mellanox Connectx5 is under development.
@@ -234,6 +235,40 @@ Default maximum supported flows is 1M (From TRex prespective. DUT might have muc
To increase the number of supported active flows, you should add ``dp_flows'' arg in config file ``memory'' section.
Look link:trex_manual.html#_memory_section_configuration[here] for more info.
+.example of CFG file
+[source,bash]
+----
+
+ - port_limit : 4
+ version : 2
+ interfaces : ["02:00.0","02:00.1","84:00.0","84:00.1"] # list of the interfaces to bind run ./dpdk_nic_bind.py --status
+ memory :
+ dp_flows : 10048576 #<1>
+
+----
+<1> more flows 10Mflows
+
+==== Loading a big YAML file raise an error no enough memory for specific pool 2048?
+
+You should increse the pool with that raise an error, for example in case of 2048
+
+.example of CFG file
+[source,bash]
+----
+
+ - port_limit : 4
+ version : 2
+ interfaces : ["02:00.0","02:00.1","84:00.0","84:00.1"] # list of the interfaces to bind run ./dpdk_nic_bind.py --status
+ memory :
+ traffic_mbuf_2048 : 8000 #<1>
+
+----
+<1> for mbuf for 2038
+
+You can run TRex with `-v 7` to verify that the configuration has an effect
+
+
+
==== I want to have more active flows on the DUT, how can I do this?
After stretching TRex to its maximum CPS capacity, consider the following: DUT will have much more active flows in case of a UDP flow due to the nature of aging (DUT does not know when the flow ends while TRex knows).
In order to artificialy increse the length of the active flows in TRex, you can config larger IPG in the YAML file. This will cause each flow to last longer. Alternatively, you can increase IPG in your PCAP file as well.
diff --git a/doc/trex_index.asciidoc b/doc/trex_index.asciidoc
index 5aa46364..28b04ae9 100644
--- a/doc/trex_index.asciidoc
+++ b/doc/trex_index.asciidoc
@@ -26,7 +26,7 @@ http://www.slideshare.net/HanochHaim/trex-realistic-traffic-generator-stateless-
[options="header",cols="<4,a"]
|=================
| Description | Name
-| FAQ |
+| Frequently Asked Questions |
link:trex_faq.html[FAQ]
| Installation Guide |
link:trex_manual.html#_download_and_installation[Installation]
diff --git a/doc/ws_main.py b/doc/ws_main.py
index 3fdc2be1..54975d02 100755
--- a/doc/ws_main.py
+++ b/doc/ws_main.py
@@ -251,6 +251,7 @@ def scansize(self):
def options(opt):
opt.add_option('--exe', action='store_true', default=False, help='Execute the program after it is compiled')
opt.add_option('--performance', action='store_true', help='Build a performance report based on google analytics')
+ opt.add_option('--performance-detailed',action='store_true',help='print detailed test results (date,time, build id and results) to csv file named _detailed_table.csv.')
def configure(conf):
search_path = '~/.local/bin /usr/local/bin/ /usr/bin'
@@ -891,7 +892,10 @@ def build_cp(bld,dir,root,callback):
def create_analytic_report(task):
try:
import AnalyticsWebReport as analytics
- analytics.main(verbose = Logs.verbose)
+ if task.generator.bld.options.performance_detailed:
+ analytics.main(verbose = Logs.verbose,detailed_test_stats='yes')
+ else:
+ analytics.main(verbose = Logs.verbose)
except Exception as e:
raise Exception('Error importing or using AnalyticsWebReport script: %s' % e)
@@ -921,7 +925,7 @@ def build(bld):
bld(rule=my_copy, target=x)
bld.add_group()
- if bld.options.performance:
+ if bld.options.performance or bld.options.performance_detailed:
bld(rule=create_analytic_report)
bld.add_group()
bld(rule=convert_to_html_toc_book, source='trex_analytics.asciidoc waf.css', target='trex_analytics.html',scan=ascii_doc_scan);
diff --git a/external_libs/ibverbs/include/infiniband/mlx5_hw.h b/external_libs/ibverbs/include/infiniband/mlx5_hw.h
index c772f339..2ac217d0 100644
--- a/external_libs/ibverbs/include/infiniband/mlx5_hw.h
+++ b/external_libs/ibverbs/include/infiniband/mlx5_hw.h
@@ -31,7 +31,6 @@
#define MLX5_CQ_DB_REQ_NOT (0 << 24)
#define MLX5E_CQE_FORMAT_MASK 0xc
-
enum mlx5_alloc_type { MXM_MLX5_ALLOC_TYPE_DUMMY };
enum mlx5_rsc_type { MXM_MLX5_RSC_TYPE_DUMMY };
enum mlx5_db_method { MXM_MLX5_DB_TYPE_DUMMY };
diff --git a/linux_dpdk/ws_main.py b/linux_dpdk/ws_main.py
index fa427fee..43762443 100755
--- a/linux_dpdk/ws_main.py
+++ b/linux_dpdk/ws_main.py
@@ -586,11 +586,12 @@ includes_path =''' ../src/pal/linux_dpdk/
../src/dpdk/lib/librte_ring/
''';
+
+dpdk_includes_verb_path =''
+
dpdk_includes_path =''' ../src/
../src/pal/linux_dpdk/
../src/pal/linux_dpdk/dpdk
- ../external_libs/ibverbs/include/
-
../src/dpdk/drivers/
../src/dpdk/drivers/net/
../src/dpdk/drivers/net/af_packet/
@@ -650,6 +651,8 @@ dpdk_includes_path =''' ../src/
''';
+
+
DPDK_FLAGS=['-D_GNU_SOURCE', '-DPF_DRIVER', '-DX722_SUPPORT', '-DX722_A0_SUPPORT', '-DVF_DRIVER', '-DINTEGRATED_VF'];
client_external_libs = [
@@ -808,10 +811,9 @@ def build_prog (bld, build_obj):
if not build_obj.isRelease ():
debug_file_list +=ef_src.file_list(top)
-
bld.objects(
features='c ',
- includes = dpdk_includes_path,
+ includes = dpdk_includes_path+dpdk_includes_verb_path,
cflags = (build_obj.get_c_flags()+DPDK_FLAGS ),
source = bp_dpdk.file_list(top),
@@ -841,6 +843,7 @@ def post_build(bld):
install_single_system(bld, exec_p, obj);
def build(bld):
+ global dpdk_includes_verb_path;
bld.add_pre_fun(pre_build)
bld.add_post_fun(post_build);
@@ -850,6 +853,7 @@ def build(bld):
bld.read_shlib(name='ibverbs')
else:
ibverbs_lib_path='external_libs/ibverbs/'
+ dpdk_includes_verb_path =' \n ../external_libs/ibverbs/include/ \n'
bld.read_shlib( name='ibverbs' , paths=[top+ibverbs_lib_path] )
check_ibverbs_deps(bld)
diff --git a/scripts/automation/regression/setups/trex09/benchmark.yaml b/scripts/automation/regression/setups/trex09/benchmark.yaml
index 1d2b57c8..9fece63f 100644
--- a/scripts/automation/regression/setups/trex09/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex09/benchmark.yaml
@@ -221,7 +221,7 @@ test_performance_vm_multi_cpus_cached:
core_count : 2
mult : "90%"
mpps_per_core_golden :
- min: 28.8
+ min: 26.8
max: 29.5
test_performance_syn_attack_multi_cpus:
diff --git a/scripts/automation/regression/setups/trex11/benchmark.yaml b/scripts/automation/regression/setups/trex11/benchmark.yaml
index a4969d2d..5ebcdd55 100644
--- a/scripts/automation/regression/setups/trex11/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex11/benchmark.yaml
@@ -40,6 +40,19 @@ test_rx_check_http: &rx_http
rx_sample_rate : 128
bw_per_core : 49.464
+test_rx_check_http_ipv6:
+ << : *rx_http
+ bw_per_core : 49.237
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 8
+ cores : 1
+ rx_sample_rate : 128
+ bw_per_core : 20.9
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 23.9
### stateless ###
diff --git a/scripts/automation/regression/setups/trex14/BU/benchmark.yaml b/scripts/automation/regression/setups/trex14/BU/benchmark.yaml
new file mode 100644
index 00000000..04f13e79
--- /dev/null
+++ b/scripts/automation/regression/setups/trex14/BU/benchmark.yaml
@@ -0,0 +1,245 @@
+###############################################################
+#### TRex benchmark configuration file ####
+###############################################################
+
+#### common templates ###
+
+stat_route_dict: &stat_route_dict
+ clients_start : 16.0.0.1
+ servers_start : 48.0.0.1
+ dual_port_mask : 1.0.0.0
+ client_destination_mask : 255.0.0.0
+ server_destination_mask : 255.0.0.0
+
+nat_dict: &nat_dict
+ clients_net_start : 16.0.0.0
+ client_acl_wildcard_mask : 0.0.0.255
+ dual_port_mask : 1.0.0.0
+ pool_start : 200.0.0.0
+ pool_netmask : 255.255.255.0
+
+
+### stateful ###
+
+test_jumbo:
+ multiplier : 17
+ cores : 1
+ bw_per_core : 543.232
+
+
+test_routing_imix:
+ multiplier : 10
+ cores : 1
+ bw_per_core : 34.128
+
+
+test_routing_imix_64:
+ multiplier : 430
+ cores : 1
+ bw_per_core : 5.893
+
+
+test_static_routing_imix: &test_static_routing_imix
+ stat_route_dict : *stat_route_dict
+ multiplier : 8
+ cores : 1
+ bw_per_core : 34.339
+
+test_static_routing_imix_asymmetric: *test_static_routing_imix
+
+
+test_ipv6_simple:
+ multiplier : 9
+ cores : 2
+ bw_per_core : 19.064
+
+
+test_nat_simple_mode1: &test_nat_simple
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+ multiplier : 6000
+ cores : 1
+ nat_opened : 500000
+ allow_timeout_dev : True
+ bw_per_core : 44.445
+
+test_nat_simple_mode2: *test_nat_simple
+
+test_nat_simple_mode3: *test_nat_simple
+
+test_nat_learning: *test_nat_simple
+
+
+test_nbar_simple:
+ multiplier : 7.5
+ cores : 2
+ bw_per_core : 17.174
+ nbar_classification:
+ http : 32.58
+ rtp-audio : 21.21
+ oracle_sqlnet : 11.41
+ exchange : 11.22
+ rtp : 11.2
+ citrix : 5.65
+ rtsp : 2.87
+ dns : 1.96
+ smtp : 0.57
+ pop3 : 0.37
+ ssl : 0.28
+ sctp : 0.13
+ sip : 0.09
+ unknown : 0.45
+
+
+test_rx_check_http: &rx_http
+ multiplier : 15000
+ cores : 1
+ rx_sample_rate : 16
+ bw_per_core : 39.560
+
+test_rx_check_http_ipv6:
+ << : *rx_http
+ bw_per_core : 49.237
+
+test_rx_check_http_negative_disabled:
+ << : *rx_http
+ stat_route_dict : *stat_route_dict
+ nat_dict : *nat_dict
+
+
+test_rx_check_sfr: &rx_sfr
+ multiplier : 10
+ cores : 3
+ rx_sample_rate : 16
+ bw_per_core : 16.082
+
+test_rx_check_sfr_ipv6:
+ << : *rx_sfr
+ bw_per_core : 19.198
+
+
+
+### stateless ###
+
+test_CPU_benchmark:
+ profiles:
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 64, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# causes queue full
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 64, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 10}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_for_benchmarks.py
+ kwargs : {packet_len: 9000, stream_count: 100}
+ cpu_util : 1
+ bw_per_core : 1
+
+# not enough memory + queue full if memory increase
+# - name : stl/udp_for_benchmarks.py
+# kwargs : {packet_len: 9000, stream_count: 1000}
+# cpu_util : 1
+# bw_per_core : 1
+
+ - name : stl/imix.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 64}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 128}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 256}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 512}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 1500}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 4000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_1pkt_tuple_gen.py
+ kwargs : {packet_len: 9000}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/pcap.py
+ kwargs : {ipg_usec: 2, loop_count: 0}
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+ - name : stl/hlt/hlt_udp_rand_len_9k.py
+ cpu_util : 1
+ bw_per_core : 1
+
+
diff --git a/scripts/automation/regression/setups/trex14/BU/config.yaml b/scripts/automation/regression/setups/trex14/BU/config.yaml
new file mode 100644
index 00000000..0fd6b70e
--- /dev/null
+++ b/scripts/automation/regression/setups/trex14/BU/config.yaml
@@ -0,0 +1,67 @@
+################################################################
+#### TRex nightly test configuration file ####
+################################################################
+
+
+### TRex configuration:
+# hostname - can be DNS name or IP for the TRex machine for ssh to the box
+# password - root password for TRex machine
+# is_dual - should the TRex inject with -p ?
+# version_path - path to the TRex version and executable
+# cores - how many cores should be used
+# latency - rate of latency packets injected by the TRex
+# modes - list of modes (tagging) of this setup (loopback etc.)
+# * loopback - Trex works via loopback. Router and TFTP configurations may be skipped.
+# * VM - Virtual OS (accept low CPU utilization in tests, latency can get spikes)
+# * virt_nics - NICs are virtual (VMXNET3 etc.)
+
+### Router configuration:
+# hostname - the router hostname as apears in ______# cli prefix
+# ip_address - the router's ip that can be used to communicate with
+# image - the desired imaged wished to be loaded as the router's running config
+# line_password - router password when access via Telent
+# en_password - router password when changing to "enable" mode
+# interfaces - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test.
+# The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname - the tftp hostname
+# ip_address - the tftp's ip address
+# images_path - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+ hostname : csi-trex-14
+ cores : 4
+ modes : []
+
+router:
+ model : ASR1001x
+ hostname : csi-asr-01
+ ip_address : 10.56.216.103
+ image : asr1001x-universalk9.03.17.00.S.156-1.S-std.SPA.bin
+ line_password : cisco
+ en_password : cisco
+ mgmt_interface : GigabitEthernet0
+ clean_config : /Configurations/danklei/asr1001_TRex_clean_config.cfg
+ intf_masking : 255.255.255.0
+ ipv6_mask : 64
+ interfaces :
+ - client :
+ name : Te0/0/0
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ server :
+ name : Te0/0/1
+ src_mac_addr : 0000.0001.0000
+ dest_mac_addr : 0000.0001.0000
+ vrf_name : null
+
+tftp:
+ hostname : ats-asr-srv-1
+ ip_address : 10.56.217.7
+ root_dir : /scratch/tftp/
+ images_path : /asr1001x/
diff --git a/scripts/automation/regression/setups/trex14/benchmark.yaml b/scripts/automation/regression/setups/trex14/benchmark.yaml
index 04f13e79..0dc340b0 100644
--- a/scripts/automation/regression/setups/trex14/benchmark.yaml
+++ b/scripts/automation/regression/setups/trex14/benchmark.yaml
@@ -75,20 +75,19 @@ test_nbar_simple:
cores : 2
bw_per_core : 17.174
nbar_classification:
- http : 32.58
- rtp-audio : 21.21
- oracle_sqlnet : 11.41
- exchange : 11.22
- rtp : 11.2
- citrix : 5.65
- rtsp : 2.87
- dns : 1.96
+ rtp : 32.57
+ http : 30.25
+ oracle_sqlnet : 11.23
+ exchange : 10.80
+ citrix : 5.62
+ rtsp : 2.84
+ dns : 1.95
smtp : 0.57
- pop3 : 0.37
- ssl : 0.28
+ pop3 : 0.36
+ ssl : 0.17
sctp : 0.13
sip : 0.09
- unknown : 0.45
+ unknown : 3.41
test_rx_check_http: &rx_http
diff --git a/scripts/automation/regression/setups/trex14/config.yaml b/scripts/automation/regression/setups/trex14/config.yaml
index 0fd6b70e..ffb61763 100644
--- a/scripts/automation/regression/setups/trex14/config.yaml
+++ b/scripts/automation/regression/setups/trex14/config.yaml
@@ -36,28 +36,27 @@
trex:
hostname : csi-trex-14
cores : 4
- modes : []
router:
model : ASR1001x
hostname : csi-asr-01
- ip_address : 10.56.216.103
- image : asr1001x-universalk9.03.17.00.S.156-1.S-std.SPA.bin
+ ip_address : 10.56.216.120
+ image : asr1001x-universalk9.03.13.02.S.154-3.S2-ext.SPA.bin
line_password : cisco
en_password : cisco
mgmt_interface : GigabitEthernet0
- clean_config : /Configurations/danklei/asr1001_TRex_clean_config.cfg
+ clean_config : clean_config.cfg
intf_masking : 255.255.255.0
ipv6_mask : 64
interfaces :
- client :
name : Te0/0/0
- src_mac_addr : 0000.0001.0000
- dest_mac_addr : 0000.0001.0000
+ src_mac_addr : 0000.0001.0002
+ dest_mac_addr : 0000.0001.0001
server :
name : Te0/0/1
- src_mac_addr : 0000.0001.0000
- dest_mac_addr : 0000.0001.0000
+ src_mac_addr : 0000.0002.0002
+ dest_mac_addr : 0000.0002.0001
vrf_name : null
tftp:
diff --git a/scripts/automation/regression/stateful_tests/trex_general_test.py b/scripts/automation/regression/stateful_tests/trex_general_test.py
index 1843af00..fe38ed34 100755
--- a/scripts/automation/regression/stateful_tests/trex_general_test.py
+++ b/scripts/automation/regression/stateful_tests/trex_general_test.py
@@ -257,7 +257,7 @@ class CTRexGeneral_Test(unittest.TestCase):
allowed_latency = 1000
if max(trex_res.get_max_latency().values()) > allowed_latency:
self.fail('LatencyError: Maximal latency exceeds %s (usec)' % allowed_latency)
-
+
# check that avg latency does not exceed 1 msec
if self.is_VM:
allowed_latency = 9999999
@@ -266,6 +266,15 @@ class CTRexGeneral_Test(unittest.TestCase):
if max(trex_res.get_avg_latency().values()) > allowed_latency:
self.fail('LatencyError: Average latency exceeds %s (usec)' % allowed_latency)
+ ports_names = trex_res.get_last_value('trex-latecny-v2.data', 'port\-\d+')
+ if not ports_names:
+ raise AbnormalResultError('Could not find ports info in TRex results, path: trex-latecny-v2.data.port-*')
+ for port_name in ports_names:
+ path = 'trex-latecny-v2.data.%s.hist.cnt' % port_name
+ lat_count = trex_res.get_last_value(path)
+ if lat_count == 0:
+ self.fail('LatencyError: Number of latency packets received on %s is 0' % port_name)
+
if not self.is_loopback:
# check router number of drops --> deliberately masked- need to be figured out!!!!!
pkt_drop_stats = self.router.get_drop_stats()
@@ -359,7 +368,7 @@ class CTRexGeneral_Test(unittest.TestCase):
print("Can't get TRex log:", e)
if len(self.fail_reasons):
sys.stdout.flush()
- raise Exception('The test is failed, reasons:\n%s' % '\n'.join(self.fail_reasons))
+ raise Exception('Test failed. Reasons:\n%s' % '\n'.join(self.fail_reasons))
sys.stdout.flush()
def check_for_trex_crash(self):
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py
index 93a930e4..51e93f5a 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/trex_stl_jsonrpc_client.py
@@ -32,13 +32,35 @@ class BatchMessage(object):
id, msg = self.rpc_client.create_jsonrpc_v2(method_name, params, api_class, encode = False)
self.batch_list.append(msg)
- def invoke(self, block = False):
+ def invoke(self, block = False, chunk_size = 500000):
if not self.rpc_client.connected:
return RC_ERR("Not connected to server")
- msg = json.dumps(self.batch_list)
-
- return self.rpc_client.send_msg(msg)
+ if chunk_size:
+ response_batch = RC()
+ size = 0
+ new_batch = []
+ for msg in self.batch_list:
+ size += len(json.dumps(msg))
+ new_batch.append(msg)
+ if size > chunk_size:
+ batch_json = json.dumps(new_batch)
+ response = self.rpc_client.send_msg(batch_json)
+ if not response:
+ return response
+ response_batch.add(response)
+ size = 0
+ new_batch = []
+ if new_batch:
+ batch_json = json.dumps(new_batch)
+ response = self.rpc_client.send_msg(batch_json)
+ if not response:
+ return response
+ response_batch.add(response)
+ return response_batch
+ else:
+ batch_json = json.dumps(self.batch_list)
+ return self.rpc_client.send_msg(batch_json)
# JSON RPC v2.0 client
@@ -130,13 +152,13 @@ class JsonRpcClient(object):
if self.zipper.check_threshold(buffer):
response = self.send_raw_msg(self.zipper.compress(buffer))
- if response:
- response = self.zipper.decompress(response)
else:
response = self.send_raw_msg(buffer)
if not response:
return response
+ elif self.zipper.is_compressed(response):
+ response = self.zipper.decompress(response)
# return to string
response = response.decode()
diff --git a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/zipmsg.py b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/zipmsg.py
index 397ada16..a2a47927 100644
--- a/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/zipmsg.py
+++ b/scripts/automation/trex_control_plane/stl/trex_stl_lib/utils/zipmsg.py
@@ -6,7 +6,7 @@ class ZippedMsg:
MSG_COMPRESS_THRESHOLD = 256
MSG_COMPRESS_HEADER_MAGIC = 0xABE85CEA
- def check_threshold (self, msg):
+ def check_threshold(self, msg):
return len(msg) >= self.MSG_COMPRESS_THRESHOLD
def compress (self, msg):
@@ -16,7 +16,7 @@ class ZippedMsg:
return new_msg
- def decompress (self, msg):
+ def decompress(self, msg):
if len(msg) < 8:
return None
@@ -30,3 +30,15 @@ class ZippedMsg:
return x
+
+ def is_compressed(self, msg):
+ if len(msg) < 8:
+ return False
+
+ t = struct.unpack(">II", msg[:8])
+ if (t[0] != self.MSG_COMPRESS_HEADER_MAGIC):
+ return False
+
+ return True
+
+
diff --git a/scripts/dpdk_setup_ports.py b/scripts/dpdk_setup_ports.py
index 8475bdee..ce6d2b2f 100755
--- a/scripts/dpdk_setup_ports.py
+++ b/scripts/dpdk_setup_ports.py
@@ -324,7 +324,11 @@ Other network devices
if obj:
return int(obj.group(1));
else:
- return -1
+ obj=re.search(r'mtu (\d+)',out,flags=re.MULTILINE|re.DOTALL);
+ if obj:
+ return int(obj.group(1));
+ else:
+ return -1
def set_mtu_mlx5 (self,dev_id,new_mtu):
if len(dev_id)>0:
@@ -352,7 +356,9 @@ Other network devices
def check_ofe_version (self):
ofed_info='/usr/bin/ofed_info'
- ofed_ver= 'MLNX_OFED_LINUX-3.4-1.0.0.0'
+ ofed_ver= '-3.4-'
+ ofed_ver_show= '3.4-1'
+
if not os.path.isfile(ofed_info):
print("OFED %s is not installed on this setup" % ofed_info)
@@ -368,7 +374,7 @@ Other network devices
if len(lines)>1:
if not (ofed_ver in str(lines[0])):
- print("installed OFED version is '%s' should be '%s' " % (lines[0],ofed_ver))
+ print("installed OFED version is '%s' should be at least '%s' and up" % (lines[0],ofed_ver_show))
exit(-1);
@@ -989,6 +995,8 @@ def main ():
print(e)
exit(-1)
+
+
if __name__ == '__main__':
main()
diff --git a/scripts/ko/3.10.0-327.el7.x86_64/igb_uio.ko b/scripts/ko/3.10.0-327.el7.x86_64/igb_uio.ko
new file mode 100644
index 00000000..a85b9add
--- /dev/null
+++ b/scripts/ko/3.10.0-327.el7.x86_64/igb_uio.ko
Binary files differ
diff --git a/scripts/trex-cfg b/scripts/trex-cfg
index 714aea6c..c6f12a7e 100755
--- a/scripts/trex-cfg
+++ b/scripts/trex-cfg
@@ -55,13 +55,23 @@ if ! lsmod | grep -q igb_uio ; then
fi
else
echo "ERROR: We don't have precompiled igb_uio.ko module for your kernel version"
- echo "You can try compiling yourself, using the following commands:"
- echo "\$cd ko/src "
- echo "\$make "
- echo "\$make install "
- echo "\$cd - "
- echo "Then try to run Trex again"
- exit 1
+ echo Will try compiling automatically.
+ {
+ cd ko/src &&
+ make &&
+ make install &&
+ cd -
+ } &> /dev/null || {
+ echo Automatic compilation failed.
+ echo "You can try compiling yourself, using the following commands:"
+ echo "\$cd ko/src "
+ echo "\$make "
+ echo "\$make install "
+ echo "\$cd - "
+ echo "Then try to run TRex again"
+ exit 1
+ }
+ echo Success.
fi
fi
diff --git a/src/dpdk/drivers/net/enic/base/vnic_dev.c b/src/dpdk/drivers/net/enic/base/vnic_dev.c
index 713b6089..e50b90e7 100644
--- a/src/dpdk/drivers/net/enic/base/vnic_dev.c
+++ b/src/dpdk/drivers/net/enic/base/vnic_dev.c
@@ -667,7 +667,12 @@ int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
(promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
(allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
+#define TREX_PATCH
+#ifdef TREX_PATCH
+ err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER_ALL, &a0, &a1, wait);
+#else
err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
+#endif
if (err)
pr_err("Can't set packet filter\n");
diff --git a/src/dpdk/drivers/net/enic/enic_clsf.c b/src/dpdk/drivers/net/enic/enic_clsf.c
index 23cb0124..8f68faab 100644
--- a/src/dpdk/drivers/net/enic/enic_clsf.c
+++ b/src/dpdk/drivers/net/enic/enic_clsf.c
@@ -107,6 +107,7 @@ enic_set_layer(struct filter_generic_1 *gp, unsigned int flag,
memcpy(gp->layer[layer].val, val, len);
}
+
/* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs
* without advanced filter support.
*/
@@ -132,6 +133,28 @@ copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
fltr->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
}
+#define TREX_PATCH
+#ifdef TREX_PATCH
+void
+copy_fltr_recv_all(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
+ struct rte_eth_fdir_masks *masks) {
+ struct filter_generic_1 *gp = &fltr->u.generic_1;
+ memset(gp, 0, sizeof(*gp));
+
+ struct ether_hdr eth_mask, eth_val;
+ memset(&eth_mask, 0, sizeof(eth_mask));
+ memset(&eth_val, 0, sizeof(eth_val));
+
+ eth_val.ether_type = 0x0806;
+ eth_mask.ether_type = 0;
+
+ gp->position = 0;
+ enic_set_layer(gp, 0, FILTER_GENERIC_1_L2,
+ &eth_mask, &eth_val, sizeof(struct ether_hdr));
+
+}
+#endif
+
/* Copy Flow Director filter to a VIC generic filter (requires advanced
* filter support.
*/
@@ -146,6 +169,11 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
fltr->type = FILTER_DPDK_1;
memset(gp, 0, sizeof(*gp));
+#ifdef TREX_PATCH
+ // important for this to be below 2.
+ // If added with position 2, IPv6 UDP and ICMP seems to be caught by some other rule
+ gp->position = 1;
+#endif
if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
struct udp_hdr udp_mask, udp_val;
@@ -344,11 +372,23 @@ int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
case -EINVAL:
case -ENOENT:
enic->fdir.stats.f_remove++;
+#ifdef TREX_PATCH
+ return pos;
+#else
return -EINVAL;
+#endif
default:
/* The entry is present in the table */
key = enic->fdir.nodes[pos];
+#ifdef TREX_PATCH
+ switch (params->soft_id) {
+ case 100:
+ // remove promisc when we delete 'receive all' filter
+ vnic_dev_packet_filter(enic->vdev, 1, 1, 1, 0, 1);
+ break;
+ }
+#endif
/* Delete the filter */
vnic_dev_classifier(enic->vdev, CLSF_DEL,
&key->fltr_id, NULL);
@@ -455,8 +495,19 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
key->filter = *params;
key->rq_index = queue;
- enic->fdir.copy_fltr_fn(&fltr, &params->input,
- &enic->rte_dev->data->dev_conf.fdir_conf.mask);
+#ifdef TREX_PATCH
+ switch (params->soft_id) {
+ case 100:
+ vnic_dev_packet_filter(enic->vdev, 1, 1, 1, 1, 1);
+ copy_fltr_recv_all(&fltr, &params->input, &enic->rte_dev->data->dev_conf.fdir_conf.mask);
+ break;
+ default:
+#endif
+ enic->fdir.copy_fltr_fn(&fltr, &params->input,
+ &enic->rte_dev->data->dev_conf.fdir_conf.mask);
+#ifdef TREX_PATCH
+ }
+#endif
if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr)) {
key->fltr_id = queue;
diff --git a/src/dpdk/drivers/net/mlx5/mlx5_rxtx.c b/src/dpdk/drivers/net/mlx5/mlx5_rxtx.c
index fce3381a..c0bcfd03 100644
--- a/src/dpdk/drivers/net/mlx5/mlx5_rxtx.c
+++ b/src/dpdk/drivers/net/mlx5/mlx5_rxtx.c
@@ -908,7 +908,7 @@ mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
mpw->wqe->mpw.eseg.rsvd2 = 0;
mpw->wqe->mpw.ctrl.data[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
(txq->wqe_ci << 8) |
- MLX5_OPCODE_LSO_MPW);
+ MLX5_OPCODE_TSO);
mpw->wqe->mpw.ctrl.data[2] = 0;
mpw->wqe->mpw.ctrl.data[3] = 0;
mpw->data.dseg[0] = &mpw->wqe->mpw.dseg[0];
@@ -1107,7 +1107,7 @@ mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
mpw->wqe = &(*txq->wqes)[idx];
mpw->wqe->mpw_inl.ctrl.data[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
(txq->wqe_ci << 8) |
- MLX5_OPCODE_LSO_MPW);
+ MLX5_OPCODE_TSO);
mpw->wqe->mpw_inl.ctrl.data[2] = 0;
mpw->wqe->mpw_inl.ctrl.data[3] = 0;
mpw->wqe->mpw_inl.eseg.mss = htons(length);
diff --git a/src/main_dpdk.cpp b/src/main_dpdk.cpp
index 45c37260..f8f365c8 100644
--- a/src/main_dpdk.cpp
+++ b/src/main_dpdk.cpp
@@ -96,6 +96,7 @@ extern "C" {
#define RTE_TEST_RX_DESC_DEFAULT 64
#define RTE_TEST_RX_LATENCY_DESC_DEFAULT (1*1024)
+#define RTE_TEST_RX_DESC_DEFAULT_MLX 8
#define RTE_TEST_RX_DESC_VM_DEFAULT 512
#define RTE_TEST_TX_DESC_VM_DEFAULT 512
@@ -177,6 +178,15 @@ public:
virtual bool drop_packets_incase_of_linkdown() {
return (false);
}
+
+ /* Mellanox ConnectX-4 can drop only 35MPPS per Rx queue. to workaround this issue we will create multi rx queue and enable RSS. for Queue1 we will disable RSS
+ return zero for disable patch and rx queues number for enable
+ */
+
+ virtual uint16_t enable_rss_drop_workaround(void) {
+ return (0);
+ }
+
};
@@ -443,8 +453,8 @@ public:
private:
- virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint8_t ttl
- , uint16_t ip_id, uint8_t l4_proto, int queue);
+ virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint16_t id
+ , uint8_t l4_proto, uint8_t tos, int queue);
virtual int add_del_eth_type_rule(uint8_t port_id, enum rte_filter_op op, uint16_t eth_type);
virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
@@ -493,6 +503,10 @@ public:
virtual CFlowStatParser *get_flow_stat_parser();
virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
+ virtual uint16_t enable_rss_drop_workaround(void) {
+ return (5);
+ }
+
private:
virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint16_t ip_id, uint8_t l4_proto
, int queue);
@@ -2615,6 +2629,8 @@ public:
float m_total_rx_pps;
float m_cpu_util;
+ bool m_link_up = true;
+ bool m_link_was_down = false;
};
class CGlobalStats {
@@ -2885,8 +2901,11 @@ void CGlobalStats::Dump(FILE *fd,DumpFormat mode){
fprintf (fd," --------------- \n");
for (i=0; i<(int)port_to_show; i++) {
CPerPortStats * lp=&m_port[i];
- fprintf(fd,"port : %d \n",(int)i);
- fprintf(fd,"------------\n");
+ fprintf(fd,"port : %d ",(int)i);
+ if ( ! lp->m_link_up ) {
+ fprintf(fd," (link DOWN)");
+ }
+ fprintf(fd,"\n------------\n");
#define GS_DP_A4(f) fprintf(fd," %-40s : %llu \n",#f, (unsigned long long)lp->f)
#define GS_DP_A(f) if (lp->f) fprintf(fd," %-40s : %llu \n",#f, (unsigned long long)lp->f)
GS_DP_A4(opackets);
@@ -2900,7 +2919,13 @@ void CGlobalStats::Dump(FILE *fd,DumpFormat mode){
}else{
fprintf(fd," %10s ","ports");
for (i=0; i<(int)port_to_show; i++) {
- fprintf(fd,"| %15d ",i);
+ CPerPortStats * lp=&m_port[i];
+ if ( lp->m_link_up ) {
+ fprintf(fd,"| %15d ",i);
+ } else {
+ std::string port_with_state = "(link DOWN) " + std::to_string(i);
+ fprintf(fd,"| %15s ",port_with_state.c_str());
+ }
}
fprintf(fd,"\n");
fprintf(fd," -----------------------------------------------------------------------------------------\n");
@@ -3543,6 +3568,7 @@ int CGlobalTRex::ixgbe_start(void){
assert(CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
CPhyEthIF * _if=&m_ports[i];
_if->Create((uint8_t)i);
+ uint16_t rx_rss = get_ex_drv()->enable_rss_drop_workaround();
if ( get_vm_one_queue_enable() ) {
/* VMXNET3 does claim to support 16K but somehow does not work */
@@ -3560,23 +3586,50 @@ int CGlobalTRex::ixgbe_start(void){
} else {
// 2 rx queues.
// TX queues: 1 for each core handling the port pair + 1 for latency pkts + 1 for use by RX core
- _if->configure(2, m_cores_to_dual_ports + 2, &m_port_cfg.m_port_conf);
+
+ uint16_t rx_queues;
+
+ if (rx_rss==0) {
+ rx_queues=2;
+ }else{
+ rx_queues=rx_rss;
+ }
+
+ _if->configure(rx_queues, m_cores_to_dual_ports + 2, &m_port_cfg.m_port_conf);
m_rx_core_tx_q_id = m_cores_to_dual_ports;
- // setup RX drop queue
- _if->rx_queue_setup(MAIN_DPDK_DATA_Q,
- RTE_TEST_RX_DESC_DEFAULT,
- socket_id,
- &m_port_cfg.m_rx_conf,
- CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
- // setup RX filter queue
- _if->set_rx_queue(MAIN_DPDK_RX_Q);
+ if ( rx_rss ) {
+ int j=0;
+ for (j=0;j<rx_rss; j++) {
+ if (j==MAIN_DPDK_RX_Q){
+ continue;
+ }
+ /* drop queue */
+ _if->rx_queue_setup(j,
+ RTE_TEST_RX_DESC_DEFAULT_MLX,
+ socket_id,
+ &m_port_cfg.m_rx_conf,
+ CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
+
+
+ }
+ }else{
+ // setup RX drop queue
+ _if->rx_queue_setup(MAIN_DPDK_DATA_Q,
+ RTE_TEST_RX_DESC_DEFAULT,
+ socket_id,
+ &m_port_cfg.m_rx_conf,
+ CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
+ // setup RX filter queue
+ _if->set_rx_queue(MAIN_DPDK_RX_Q);
+ }
+
_if->rx_queue_setup(MAIN_DPDK_RX_Q,
RTE_TEST_RX_LATENCY_DESC_DEFAULT,
socket_id,
&m_port_cfg.m_rx_conf,
CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_9k);
- // setup TX queues
+
for (int qid = 0; qid < m_max_queues_per_port; qid++) {
_if->tx_queue_setup((uint16_t)qid,
RTE_TEST_TX_DESC_DEFAULT ,
@@ -3585,6 +3638,10 @@ int CGlobalTRex::ixgbe_start(void){
}
}
+ if ( rx_rss ){
+ _if->configure_rss_redirect_table(rx_rss,MAIN_DPDK_RX_Q);
+ }
+
_if->stats_clear();
_if->start();
_if->configure_rx_duplicate_rules();
@@ -3986,6 +4043,11 @@ void CGlobalTRex::dump_post_test_stats(FILE *fd){
}
else
fprintf (fd, " Total-pkt-drop : %llu pkts \n", (unsigned long long) (pkt_out - pkt_in));
+ for (i=0; i<m_max_ports; i++) {
+ if ( m_stats.m_port[i].m_link_was_down ) {
+ fprintf (fd, " WARNING: Link was down at port %d during test (at least for some time)!\n", i);
+ }
+ }
fprintf (fd," Total-tx-bytes : %llu bytes \n", (unsigned long long)pkt_out_bytes);
fprintf (fd," Total-tx-sw-bytes : %llu bytes \n", (unsigned long long)sw_pkt_out_bytes);
fprintf (fd," Total-rx-bytes : %llu byte \n", (unsigned long long)pkt_in_bytes);
@@ -4101,6 +4163,8 @@ void CGlobalTRex::get_stats(CGlobalStats & stats){
stp->m_total_tx_pps = _if->get_last_tx_pps_rate();
stp->m_total_rx_bps = _if->get_last_rx_rate()*_1Mb_DOUBLE;
stp->m_total_rx_pps = _if->get_last_rx_pps_rate();
+ stp->m_link_up = _if->get_port_attr()->is_link_up();
+ stp->m_link_was_down |= ! _if->get_port_attr()->is_link_up();
stats.m_total_tx_pkts += st.opackets;
stats.m_total_rx_pkts += st.ipackets;
@@ -4458,18 +4522,20 @@ CGlobalTRex::handle_slow_path() {
m_mg.update();
if ( m_io_modes.m_g_mode == CTrexGlobalIoMode::gNORMAL ) {
- switch (m_io_modes.m_l_mode) {
- case CTrexGlobalIoMode::lDISABLE:
- fprintf(stdout,"\n+Latency stats disabled \n");
- break;
- case CTrexGlobalIoMode::lENABLE:
- fprintf(stdout,"\n-Latency stats enabled \n");
- m_mg.DumpShort(stdout);
- break;
- case CTrexGlobalIoMode::lENABLE_Extended:
- fprintf(stdout,"\n-Latency stats extended \n");
- m_mg.Dump(stdout);
- break;
+ if (CGlobalInfo::m_options.m_latency_rate != 0) {
+ switch (m_io_modes.m_l_mode) {
+ case CTrexGlobalIoMode::lDISABLE:
+ fprintf(stdout, "\n+Latency stats disabled \n");
+ break;
+ case CTrexGlobalIoMode::lENABLE:
+ fprintf(stdout, "\n-Latency stats enabled \n");
+ m_mg.DumpShort(stdout);
+ break;
+ case CTrexGlobalIoMode::lENABLE_Extended:
+ fprintf(stdout, "\n-Latency stats extended \n");
+ m_mg.Dump(stdout);
+ break;
+ }
}
if ( get_is_rx_check_mode() ) {
@@ -4872,6 +4938,57 @@ int CGlobalTRex::start_master_statefull() {
////////////////////////////////////////////
static CGlobalTRex g_trex;
+
+void CPhyEthIF::configure_rss_redirect_table(uint16_t numer_of_queues,
+ uint16_t skip_queue){
+
+
+ struct rte_eth_dev_info dev_info;
+
+ rte_eth_dev_info_get(m_port_id,&dev_info);
+ assert(dev_info.reta_size>0);
+
+ int reta_conf_size =
+ std::max(1, dev_info.reta_size / RTE_RETA_GROUP_SIZE);
+
+ struct rte_eth_rss_reta_entry64 reta_conf[reta_conf_size];
+
+ rte_eth_dev_rss_reta_query(m_port_id,&reta_conf[0],dev_info.reta_size);
+
+ int i,j;
+
+ for (j=0; j<reta_conf_size; j++) {
+ uint16_t skip=0;
+ reta_conf[j].mask = ~0ULL;
+ for (i=0; i<RTE_RETA_GROUP_SIZE; i++) {
+ uint16_t q;
+ while (true) {
+ q=(i+skip)%numer_of_queues;
+ if (q!=skip_queue) {
+ break;
+ }
+ skip+=1;
+ }
+ reta_conf[j].reta[i]=q;
+ // printf(" %d %d %d \n",j,i,q);
+ }
+ }
+ rte_eth_dev_rss_reta_update(m_port_id,&reta_conf[0],dev_info.reta_size);
+
+ rte_eth_dev_rss_reta_query(m_port_id,&reta_conf[0],dev_info.reta_size);
+
+ #if 0
+ /* verification */
+ for (j=0; j<reta_conf_size; j++) {
+ for (i=0; i<RTE_RETA_GROUP_SIZE; i++) {
+ printf(" R %d %d %d \n",j,i,reta_conf[j].reta[i]);
+ }
+ }
+ #endif
+
+}
+
+
void CPhyEthIF::update_counters() {
get_ex_drv()->get_extended_stats(this, &m_stats);
CRXCoreIgnoreStat ign_stats;
@@ -6356,11 +6473,11 @@ int CTRexExtendedDriverBase40G::configure_rx_filter_rules(CPhyEthIF * _if) {
add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_OTHER, 0
, FLOW_STAT_PAYLOAD_IP_ID, IPPROTO_ICMP, MAIN_DPDK_RX_Q, FDIR_PAYLOAD_RULES_HW_ID);
add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_UDP, 0
- , FLOW_STAT_PAYLOAD_IP_ID, 0, MAIN_DPDK_RX_Q, 0);
+ , FLOW_STAT_PAYLOAD_IP_ID, 0, MAIN_DPDK_RX_Q, FDIR_PAYLOAD_RULES_HW_ID);
add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_TCP, 0
- , FLOW_STAT_PAYLOAD_IP_ID, 0, MAIN_DPDK_RX_Q, 0);
+ , FLOW_STAT_PAYLOAD_IP_ID, 0, MAIN_DPDK_RX_Q, FDIR_PAYLOAD_RULES_HW_ID);
add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_OTHER, 0
- , FLOW_STAT_PAYLOAD_IP_ID, 0, MAIN_DPDK_RX_Q, 0);
+ , FLOW_STAT_PAYLOAD_IP_ID, 0, MAIN_DPDK_RX_Q, FDIR_PAYLOAD_RULES_HW_ID);
rte_eth_fdir_stats_reset(_if->get_port_id(), NULL, FDIR_TEMP_HW_ID, 1);
return 0; // Other rules are configured dynamically in stateless
@@ -6560,6 +6677,11 @@ void CTRexExtendedDriverBaseMlnx5G::update_configuration(port_cfg_t * cfg){
cfg->m_port_conf.fdir_conf.mask.ipv4_mask.tos=0x01;
cfg->m_port_conf.fdir_conf.mask.ipv6_mask.proto=0xff;
cfg->m_port_conf.fdir_conf.mask.ipv6_mask.tc=0x01;
+
+ /* enable RSS */
+ cfg->m_port_conf.rxmode.mq_mode =ETH_MQ_RX_RSS;
+ cfg->m_port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP;
+
}
/*
@@ -6581,7 +6703,7 @@ void CTRexExtendedDriverBaseMlnx5G::add_del_rules(enum rte_filter_op op, uint8_t
memset(&filter,0,sizeof(struct rte_eth_fdir_filter));
#if 0
- printf("40g::%s rules: port:%d type:%d ip_id:%x l4:%d q:%d\n"
+ printf("MLNX add_del_rules::%s rules: port:%d type:%d ip_id:%x l4:%d q:%d\n"
, (op == RTE_ETH_FILTER_ADD) ? "add" : "del"
, port_id, type, ip_id, l4_proto, queue);
#endif
@@ -6597,7 +6719,6 @@ void CTRexExtendedDriverBaseMlnx5G::add_del_rules(enum rte_filter_op op, uint8_t
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
- // filter.input.flow.ip4_flow.ttl = ttl;
filter.input.flow.ip4_flow.ip_id = ip_id;
if (l4_proto != 0)
filter.input.flow.ip4_flow.proto = l4_proto;
@@ -6605,7 +6726,6 @@ void CTRexExtendedDriverBaseMlnx5G::add_del_rules(enum rte_filter_op op, uint8_t
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
- // filter.input.flow.ipv6_flow.hop_limits=ttl;
filter.input.flow.ipv6_flow.flow_label = ip_id;
filter.input.flow.ipv6_flow.proto = l4_proto;
break;
@@ -6629,6 +6749,7 @@ int CTRexExtendedDriverBaseMlnx5G::set_rcv_all(CPhyEthIF * _if, bool set_on) {
add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_UDP, 2, 17, MAIN_DPDK_RX_Q);
} else {
add_del_rules(RTE_ETH_FILTER_DELETE, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_UDP, 2, 17, MAIN_DPDK_RX_Q);
+ add_del_rx_filter_rules(_if, true);
}
return 0;
@@ -6742,30 +6863,13 @@ void CTRexExtendedDriverBaseVIC::update_configuration(port_cfg_t * cfg){
cfg->m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
cfg->m_tx_conf.tx_thresh.wthresh = TX_WTHRESH;
cfg->m_port_conf.rxmode.max_rx_pkt_len =9*1000-10;
-
- if (get_is_stateless()) {
- /* work in TOS mode */
- cfg->m_port_conf.fdir_conf.mask.ipv4_mask.tos = 0x01;
- cfg->m_port_conf.fdir_conf.mask.ipv6_mask.tc = 0x01;
- }else{
- #ifdef VIC_TTL_FILTER
- cfg->m_port_conf.fdir_conf.mask.ipv4_mask.ttl = 0xff;
- cfg->m_port_conf.fdir_conf.mask.ipv6_mask.hop_limits = 0xff;
- #else
- cfg->m_port_conf.fdir_conf.mask.ipv4_mask.tos = 0x01;
- cfg->m_port_conf.fdir_conf.mask.ipv6_mask.tc = 0x01;
- #endif
- }
+ cfg->m_port_conf.fdir_conf.mask.ipv4_mask.tos = 0x01;
+ cfg->m_port_conf.fdir_conf.mask.ipv6_mask.tc = 0x01;
}
-
-/* Add rule to send packets with protocol 'type', and ttl 'ttl' to rx queue 1 */
-// ttl is used in statefull mode, and ip_id in stateless. We configure the driver registers so that only one of them applies.
-// So, the rule will apply if packet has either the correct ttl or IP ID, depending if we are in statfull or stateless.
-void CTRexExtendedDriverBaseVIC::add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint8_t ttl
- , uint16_t ip_id, uint8_t l4_proto, int queue) {
+void CTRexExtendedDriverBaseVIC::add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type
+ , uint16_t id, uint8_t l4_proto, uint8_t tos, int queue) {
int ret=rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
- static int filter_soft_id = 0;
if ( ret != 0 ){
rte_exit(EXIT_FAILURE, "rte_eth_dev_filter_supported "
@@ -6778,15 +6882,15 @@ void CTRexExtendedDriverBaseVIC::add_del_rules(enum rte_filter_op op, uint8_t po
memset(&filter,0,sizeof(struct rte_eth_fdir_filter));
#if 0
- printf("40g::%s rules: port:%d type:%d ttl:%d ip_id:%x l4:%d q:%d\n"
+ printf("VIC add_del_rules::%s rules: port:%d type:%d id:%d l4:%d tod:%d, q:%d\n"
, (op == RTE_ETH_FILTER_ADD) ? "add" : "del"
- , port_id, type, ttl, ip_id, l4_proto, queue);
+ , port_id, type, id, l4_proto, tos, queue);
#endif
filter.action.rx_queue = queue;
- filter.action.behavior =RTE_ETH_FDIR_ACCEPT;
- filter.action.report_status =RTE_ETH_FDIR_NO_REPORT_STATUS;
- filter.soft_id = filter_soft_id++;
+ filter.action.behavior = RTE_ETH_FDIR_ACCEPT;
+ filter.action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
+ filter.soft_id = id;
filter.input.flow_type = type;
switch (type) {
@@ -6794,22 +6898,22 @@ void CTRexExtendedDriverBaseVIC::add_del_rules(enum rte_filter_op op, uint8_t po
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
- filter.input.flow.ip4_flow.tos=ttl;
- filter.input.flow.ip4_flow.ip_id = ip_id;
- if (l4_proto != 0)
- filter.input.flow.ip4_flow.proto = l4_proto;
+ filter.input.flow.ip4_flow.tos = tos;
+ filter.input.flow.ip4_flow.proto = l4_proto;
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
- filter.input.flow.ipv6_flow.tc=ttl;
- filter.input.flow.ipv6_flow.flow_label = ip_id;
+ filter.input.flow.ipv6_flow.tc = tos;
filter.input.flow.ipv6_flow.proto = l4_proto;
break;
}
ret = rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, op, (void*)&filter);
if ( ret != 0 ) {
+ if (((op == RTE_ETH_FILTER_ADD) && (ret == -EEXIST)) || ((op == RTE_ETH_FILTER_DELETE) && (ret == -ENOENT)))
+ return;
+
rte_exit(EXIT_FAILURE, "rte_eth_dev_filter_ctrl: err=%d, port=%u\n",
ret, port_id);
}
@@ -6828,38 +6932,46 @@ int CTRexExtendedDriverBaseVIC::add_del_eth_type_rule(uint8_t port_id, enum rte_
return ret;
}
-extern "C" int rte_eth_fdir_stats_reset(uint8_t port_id, uint32_t *stats, uint32_t start, uint32_t len);
-
int CTRexExtendedDriverBaseVIC::configure_rx_filter_rules_statefull(CPhyEthIF * _if) {
uint32_t port_id = _if->get_port_id();
-#ifndef VIC_TTL_FILTER
- add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_UDP, 0x1, 0, 17, MAIN_DPDK_RX_Q); /*TCP/UDP */
- add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_TCP, 0x1, 0, 6, MAIN_DPDK_RX_Q);
- add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_SCTP, 0x1, 0, 132, MAIN_DPDK_RX_Q); /*SCTP*/
- add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_OTHER, 0x1, 0, 1, MAIN_DPDK_RX_Q); /*ICMP*/
-
- /* Ipv6*/
- add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_OTHER, 0x1, 0, 0, MAIN_DPDK_RX_Q); /*Any protocol on Ipv6*/
-#else
- uint16_t hops = get_rx_check_hops();
- int i;
- for (i = 0; i < 2; i++) {
- uint8_t ttl = TTL_RESERVE_DUPLICATE - i - hops;
- add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_UDP, ttl, 0, 17, MAIN_DPDK_RX_Q);
- add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_TCP, ttl, 0, 6, MAIN_DPDK_RX_Q);
- add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_UDP, ttl, 0, RX_CHECK_V6_OPT_TYPE, MAIN_DPDK_RX_Q);
- add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_TCP, ttl, 0, RX_CHECK_V6_OPT_TYPE, MAIN_DPDK_RX_Q);
- add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_OTHER, ttl, 0, RX_CHECK_V6_OPT_TYPE, MAIN_DPDK_RX_Q);
- /* Rules for latency measurement packets */
- add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_OTHER, ttl, 0, IPPROTO_ICMP, MAIN_DPDK_RX_Q);
- add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_SCTP, ttl, 0, 138, MAIN_DPDK_RX_Q);
- }
-#endif
+
+ set_rcv_all(_if, false);
+
+ // Rules to direct all IP packets with tos lsb bit 1 to RX Q.
+ // IPv4
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_UDP, 1, 17, 0x1, MAIN_DPDK_RX_Q);
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_TCP, 1, 6, 0x1, MAIN_DPDK_RX_Q);
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_SCTP, 1, 132, 0x1, MAIN_DPDK_RX_Q); /*SCTP*/
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_OTHER, 1, 1, 0x1, MAIN_DPDK_RX_Q); /*ICMP*/
+ // Ipv6
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_OTHER, 1, 6, 0x1, MAIN_DPDK_RX_Q);
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_UDP, 1, 17, 0x1, MAIN_DPDK_RX_Q);
+
+ // Because of some issue with VIC firmware, IPv6 UDP and ICMP go by default to q 1, so we
+ // need these rules to make them go to q 0.
+ // rule appply to all packets with 0 on tos lsb.
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_OTHER, 1, 6, 0, MAIN_DPDK_DATA_Q);
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_UDP, 1, 17, 0, MAIN_DPDK_DATA_Q);
return 0;
}
+int CTRexExtendedDriverBaseVIC::set_rcv_all(CPhyEthIF * _if, bool set_on) {
+ uint8_t port_id = _if->get_rte_port_id();
+
+ // soft ID 100 tells VIC driver to add rule for all ether types.
+ // Added with highest priority (implicitly in the driver), so if it exists, it applies before all other rules
+ if (set_on) {
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_UDP, 100, 30, 0, MAIN_DPDK_RX_Q);
+ } else {
+ add_del_rules(RTE_ETH_FILTER_DELETE, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_UDP, 100, 30, 0, MAIN_DPDK_RX_Q);
+ }
+
+ return 0;
+
+}
+
void CTRexExtendedDriverBaseVIC::clear_extended_stats(CPhyEthIF * _if){
rte_eth_stats_reset(_if->get_port_id());
}
@@ -6892,7 +7004,6 @@ void CTRexExtendedDriverBaseVIC::get_extended_stats(CPhyEthIF * _if,CPhyEthIFSta
prev_stats->rx_nombuf = stats1.rx_nombuf;
}
-
int CTRexExtendedDriverBaseVIC::verify_fw_ver(int port_id) {
struct rte_eth_fdir_info fdir_info;
@@ -6913,7 +7024,6 @@ int CTRexExtendedDriverBaseVIC::verify_fw_ver(int port_id) {
exit(1);
}
-
int CTRexExtendedDriverBaseVIC::configure_rx_filter_rules(CPhyEthIF * _if) {
if (get_is_stateless()) {
@@ -6941,18 +7051,12 @@ int CTRexExtendedDriverBaseVIC::dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd
return (0);
}
-
CFlowStatParser *CTRexExtendedDriverBaseVIC::get_flow_stat_parser() {
CFlowStatParser *parser = new CFlowStatParser();
assert (parser);
return parser;
}
-int CTRexExtendedDriverBaseVIC::set_rcv_all(CPhyEthIF * _if, bool set_on) {
- //printf(" NOT supported yet \n");
- return 0;
-}
-
/////////////////////////////////////////////////////////////////////////////////////
diff --git a/src/main_dpdk.h b/src/main_dpdk.h
index 6402d106..25b19471 100644
--- a/src/main_dpdk.h
+++ b/src/main_dpdk.h
@@ -109,6 +109,9 @@ class CPhyEthIF {
void dump_stats(FILE *fd);
void set_ignore_stats_base(CPreTestStats &pre_stats);
void update_counters();
+ void configure_rss_redirect_table(uint16_t numer_of_queues,
+ uint16_t skip_queue);
+
void stats_clear();
uint8_t get_port_id(){
return (m_port_id);
diff --git a/src/rpc-server/trex_rpc_jsonrpc_v2_parser.cpp b/src/rpc-server/trex_rpc_jsonrpc_v2_parser.cpp
index 4fa2447d..d08de4e1 100644
--- a/src/rpc-server/trex_rpc_jsonrpc_v2_parser.cpp
+++ b/src/rpc-server/trex_rpc_jsonrpc_v2_parser.cpp
@@ -235,3 +235,12 @@ TrexJsonRpcV2Parser::generate_common_error(Json::Value &json, const std::string
}
+void
+TrexJsonRpcV2Parser::generate_common_error(std::string &response, const std::string &specific_err) {
+ Json::Value resp_json;
+ Json::FastWriter writer;
+
+ generate_common_error(resp_json, specific_err);
+ response = writer.write(resp_json);
+}
+
diff --git a/src/rpc-server/trex_rpc_jsonrpc_v2_parser.h b/src/rpc-server/trex_rpc_jsonrpc_v2_parser.h
index 0563f21d..d91cbe2d 100644
--- a/src/rpc-server/trex_rpc_jsonrpc_v2_parser.h
+++ b/src/rpc-server/trex_rpc_jsonrpc_v2_parser.h
@@ -89,6 +89,15 @@ public:
static void generate_common_error(Json::Value &json, const std::string &specific_err);
/**
+ * will generate a valid JSON RPC v2 error message with
+ * generic error code and message
+ *
+ * @author imarom (16-Sep-15)
+ *
+ */
+ static void generate_common_error(std::string &response, const std::string &specific_err);
+
+ /**
* *tries* to generate a pretty string from JSON
* if json_str is not a valid JSON string
* it will duplicate the source
diff --git a/src/rpc-server/trex_rpc_req_resp_server.cpp b/src/rpc-server/trex_rpc_req_resp_server.cpp
index 28bf1d80..e762b8c1 100644
--- a/src/rpc-server/trex_rpc_req_resp_server.cpp
+++ b/src/rpc-server/trex_rpc_req_resp_server.cpp
@@ -171,7 +171,12 @@ void TrexRpcServerReqRes::_stop_rpc_thread() {
void TrexRpcServerReqRes::handle_request(const std::string &request) {
std::string response;
- process_request(request, response);
+ if ( request.size() > MAX_RPC_MSG_LEN ) {
+ std::string err_msg = "Request is too large (" + std::to_string(request.size()) + " bytes). Consider splitting to smaller chunks.";
+ TrexJsonRpcV2Parser::generate_common_error(response, err_msg);
+ } else {
+ process_request(request, response);
+ }
zmq_send(m_socket, response.c_str(), response.size(), 0);
}
@@ -244,7 +249,12 @@ void TrexRpcServerReqRes::process_zipped_request(const std::string &request, std
/* process the request */
std::string raw_response;
- process_request_raw(unzipped, raw_response);
+ if ( unzipped.size() > MAX_RPC_MSG_LEN ) {
+ std::string err_msg = "Request is too large (" + std::to_string(unzipped.size()) + " bytes). Consider splitting to smaller chunks.";
+ TrexJsonRpcV2Parser::generate_common_error(raw_response, err_msg);
+ } else {
+ process_request_raw(unzipped, raw_response);
+ }
TrexRpcZip::compress(raw_response, response);
@@ -256,18 +266,14 @@ void TrexRpcServerReqRes::process_zipped_request(const std::string &request, std
*/
void
TrexRpcServerReqRes::handle_server_error(const std::string &specific_err) {
- Json::FastWriter writer;
- Json::Value response;
+ std::string response;
/* generate error */
TrexJsonRpcV2Parser::generate_common_error(response, specific_err);
- /* write the JSON to string and sever on ZMQ */
- std::string response_str = writer.write(response);
-
- verbose_json("Server Replied: ", response_str);
+ verbose_json("Server Replied: ", response);
- zmq_send(m_socket, response_str.c_str(), response_str.size(), 0);
+ zmq_send(m_socket, response.c_str(), response.size(), 0);
}
diff --git a/src/rpc-server/trex_rpc_req_resp_server.h b/src/rpc-server/trex_rpc_req_resp_server.h
index 92d51a2a..9a994044 100644
--- a/src/rpc-server/trex_rpc_req_resp_server.h
+++ b/src/rpc-server/trex_rpc_req_resp_server.h
@@ -53,6 +53,7 @@ protected:
void *m_context;
void *m_socket;
+ static const uint32_t MAX_RPC_MSG_LEN = 999999;
};
/**